From 009cb059001efd8c855e2edacc3e277139f54700 Mon Sep 17 00:00:00 2001 From: Mixficsol <838844609@qq.com> Date: Mon, 15 Apr 2024 16:15:50 +0800 Subject: [PATCH] remove tcl --- pikiwidbtests.sh | 63 - tests/README.md | 4 - tests/assets/encodings.rdb | Bin 667 -> 0 bytes tests/assets/hash-zipmap.rdb | Bin 35 -> 0 bytes tests/helpers/bg_complex_data.tcl | 10 - tests/helpers/gen_write_load.tcl | 15 - tests/helpers/redis_queue.py | 111 - tests/sentinel/run.tcl | 22 - tests/sentinel/tests/00-base.tcl | 126 - tests/sentinel/tests/01-conf-update.tcl | 39 - tests/sentinel/tests/02-slaves-reconf.tcl | 84 - tests/sentinel/tests/03-runtime-reconf.tcl | 1 - tests/sentinel/tests/04-slave-selection.tcl | 5 - tests/sentinel/tests/05-manual.tcl | 44 - tests/sentinel/tests/includes/init-tests.tcl | 72 - tests/sentinel/tmp/.gitignore | 2 - tests/support/redis.tcl | 294 -- tests/support/server.tcl | 349 --- tests/support/test.tcl | 153 - tests/support/tmpfile.tcl | 15 - tests/support/util.tcl | 400 --- tests/test_helper.tcl | 554 ---- tests/unit/Blpop_Brpop_test.py | 896 ------ tests/unit/acl.tcl | 1135 ------- tests/unit/aofrw.tcl | 210 -- tests/unit/auth.tcl | 43 - tests/unit/basic.tcl | 789 ----- tests/unit/bitops.tcl | 341 -- tests/unit/command.tcl | 12 - tests/unit/dump.tcl | 142 - tests/unit/expire.tcl | 201 -- tests/unit/geo.tcl | 311 -- tests/unit/hyperloglog.tcl | 250 -- tests/unit/introspection.tcl | 59 - tests/unit/keys.tcl | 54 - tests/unit/latency-monitor.tcl | 50 - tests/unit/limits.tcl | 16 - tests/unit/maxmemory.tcl | 152 - tests/unit/memefficiency.tcl | 37 - tests/unit/multi.tcl | 309 -- tests/unit/obuf-limits.tcl | 73 - tests/unit/other.tcl | 245 -- tests/unit/printver.tcl | 6 - tests/unit/protocol.tcl | 117 - tests/unit/pubsub.tcl | 399 --- tests/unit/quit.tcl | 40 - tests/unit/scan.tcl | 286 -- tests/unit/scripting.tcl | 606 ---- tests/unit/slowlog.tcl | 70 - tests/unit/sort.tcl | 311 -- tests/unit/tcl/aof-race.tcl | 35 - tests/unit/tcl/aof.tcl | 236 -- .../unit/tcl/convert-zipmap-hash-on-load.tcl | 35 - tests/unit/tcl/rdb.tcl | 98 - tests/unit/tcl/redis-cli.tcl | 208 -- tests/unit/tcl/replication-2.tcl | 87 - tests/unit/tcl/replication-3.tcl | 101 - tests/unit/tcl/replication-4.tcl | 136 - tests/unit/tcl/replication-psync.tcl | 115 - tests/unit/tcl/replication.tcl | 215 -- tests/unit/type.tcl | 50 - tests/unit/type/hash.tcl | 863 ------ tests/unit/type/list-2.tcl | 47 - tests/unit/type/list-3.tcl | 232 -- tests/unit/type/list-common.tcl | 5 - tests/unit/type/list.tcl | 2431 --------------- tests/unit/type/set.tcl | 1314 -------- tests/unit/type/string.tcl | 674 ---- tests/unit/type/zset.tcl | 2735 ----------------- 69 files changed, 19140 deletions(-) delete mode 100755 pikiwidbtests.sh delete mode 100644 tests/README.md delete mode 100644 tests/assets/encodings.rdb delete mode 100644 tests/assets/hash-zipmap.rdb delete mode 100644 tests/helpers/bg_complex_data.tcl delete mode 100644 tests/helpers/gen_write_load.tcl delete mode 100644 tests/helpers/redis_queue.py delete mode 100644 tests/sentinel/run.tcl delete mode 100644 tests/sentinel/tests/00-base.tcl delete mode 100644 tests/sentinel/tests/01-conf-update.tcl delete mode 100644 tests/sentinel/tests/02-slaves-reconf.tcl delete mode 100644 tests/sentinel/tests/03-runtime-reconf.tcl delete mode 100644 tests/sentinel/tests/04-slave-selection.tcl delete mode 100644 tests/sentinel/tests/05-manual.tcl delete mode 100644 tests/sentinel/tests/includes/init-tests.tcl delete mode 100644 tests/sentinel/tmp/.gitignore delete mode 100644 tests/support/redis.tcl delete mode 100644 tests/support/server.tcl delete mode 100644 tests/support/test.tcl delete mode 100644 tests/support/tmpfile.tcl delete mode 100644 tests/support/util.tcl delete mode 100644 tests/test_helper.tcl delete mode 100644 tests/unit/Blpop_Brpop_test.py delete mode 100644 tests/unit/acl.tcl delete mode 100644 tests/unit/aofrw.tcl delete mode 100644 tests/unit/auth.tcl delete mode 100644 tests/unit/basic.tcl delete mode 100644 tests/unit/bitops.tcl delete mode 100644 tests/unit/command.tcl delete mode 100644 tests/unit/dump.tcl delete mode 100644 tests/unit/expire.tcl delete mode 100644 tests/unit/geo.tcl delete mode 100755 tests/unit/hyperloglog.tcl delete mode 100644 tests/unit/introspection.tcl delete mode 100644 tests/unit/keys.tcl delete mode 100644 tests/unit/latency-monitor.tcl delete mode 100644 tests/unit/limits.tcl delete mode 100644 tests/unit/maxmemory.tcl delete mode 100644 tests/unit/memefficiency.tcl delete mode 100644 tests/unit/multi.tcl delete mode 100644 tests/unit/obuf-limits.tcl delete mode 100644 tests/unit/other.tcl delete mode 100644 tests/unit/printver.tcl delete mode 100644 tests/unit/protocol.tcl delete mode 100644 tests/unit/pubsub.tcl delete mode 100644 tests/unit/quit.tcl delete mode 100644 tests/unit/scan.tcl delete mode 100644 tests/unit/scripting.tcl delete mode 100644 tests/unit/slowlog.tcl delete mode 100644 tests/unit/sort.tcl delete mode 100644 tests/unit/tcl/aof-race.tcl delete mode 100644 tests/unit/tcl/aof.tcl delete mode 100644 tests/unit/tcl/convert-zipmap-hash-on-load.tcl delete mode 100644 tests/unit/tcl/rdb.tcl delete mode 100644 tests/unit/tcl/redis-cli.tcl delete mode 100644 tests/unit/tcl/replication-2.tcl delete mode 100644 tests/unit/tcl/replication-3.tcl delete mode 100644 tests/unit/tcl/replication-4.tcl delete mode 100644 tests/unit/tcl/replication-psync.tcl delete mode 100644 tests/unit/tcl/replication.tcl delete mode 100644 tests/unit/type.tcl delete mode 100644 tests/unit/type/hash.tcl delete mode 100644 tests/unit/type/list-2.tcl delete mode 100644 tests/unit/type/list-3.tcl delete mode 100644 tests/unit/type/list-common.tcl delete mode 100644 tests/unit/type/list.tcl delete mode 100644 tests/unit/type/set.tcl delete mode 100644 tests/unit/type/string.tcl delete mode 100644 tests/unit/type/zset.tcl diff --git a/pikiwidbtests.sh b/pikiwidbtests.sh deleted file mode 100755 index cc2c228e3..000000000 --- a/pikiwidbtests.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -# clear the log file -function cleanup() { - rm -rf ./logs* - rm -rf ./db* - rm -rf dbsync/ - rm src/redis-server -} - -# check if tcl is installed -function check_tcl { - if [ -z "$(which tclsh)" ]; then - echo "tclsh is not installed" - exit 1 - fi -} - -# handle different build directories. -function setup_build_dir { - BUILD_DIR="./bin" - echo "BUILD_DIR: $BUILD_DIR" -} - -# setup pikiwidb bin and conf -function setup_pika_bin { - PIKIWIDB_BIN="./$BUILD_DIR/pikiwidb" - if [ ! -f "$PIKIWIDB_BIN" ]; then - echo "pikiwidb bin not found" - exit 1 - fi - cp $PIKIWIDB_BIN src/redis-server - cp ./pikiwidb.conf tests/assets/default.conf -} - - -cleanup - -check_tcl - -setup_build_dir - -setup_pika_bin - -echo "run pikiwidb tests $1" - -if [ "$1" == "all" ]; then - tclsh tests/test_helper.tcl --clients 1 -else - tclsh tests/test_helper.tcl --clients 1 --single unit/$1 -fi - -if [ $? -ne 0 ]; then - echo "pikiwidb tests failed" - cleanup - exit 1 -fi - -# You can use './pikiwidb.sh all clean 'to ensure that the -# data can be deleted immediately after the test -if [ "$2" == "clean" ]; then - cleanup -fi \ No newline at end of file diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index 96abac0bf..000000000 --- a/tests/README.md +++ /dev/null @@ -1,4 +0,0 @@ -### PikiwiDB test - -* 在 PikiwiDB 目录下执行 `./pikatests.sh geo` 测试PikiwiDB GEO命令 -* 如果是`unit/type`接口, 例如 SET, 执行 `./pikiwidbtests.sh type/set` 测试PikiwiDB SET命令 \ No newline at end of file diff --git a/tests/assets/encodings.rdb b/tests/assets/encodings.rdb deleted file mode 100644 index 9fd9b705d16220065ee117a1c1c094f40fb122f2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 667 zcmbVKu}UOC5UuKJ7oAzb-~v%NHW5S&dS+bpFfmX#Qw_|N?w&>$M|aur5EcU?;j)7> zGV&XY4SF>ZBR^rkz`zf1tzKQwOdP0r-Cfn)uU@~+^|g&HrPRU;knEK1xGIbhsS?(T zOp!5$Ql%uLiRxVU_K~%gGG1r2V@aAV)EAeQf1$=iXe|~B7q#x40{=g8Nhbr35aH0(af04| z31?FkfXdOIL*v>$`m`ZiW?H~$fQQSK0B})18Q{+2^#ErNo(A|lG8gy_c?x0;Mx(`{ zoa#1QiP|F?FVK2I8DyCB=mk$S8nlC&4|~3w8;|#Ox&QtGwHmXU=HND1)gUq}8+2xM zS?Yc@4yO2OwUpuPICQ~2@KI=m_~m`huJS+FRQ_l1RQDc;oztC1%JaPY56LTRm2LPS*34j0q diff --git a/tests/helpers/bg_complex_data.tcl b/tests/helpers/bg_complex_data.tcl deleted file mode 100644 index dffd7c668..000000000 --- a/tests/helpers/bg_complex_data.tcl +++ /dev/null @@ -1,10 +0,0 @@ -source tests/support/redis.tcl -source tests/support/util.tcl - -proc bg_complex_data {host port db ops} { - set r [redis $host $port] - $r select $db - createComplexDataset $r $ops -} - -bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3] diff --git a/tests/helpers/gen_write_load.tcl b/tests/helpers/gen_write_load.tcl deleted file mode 100644 index 6d1a34516..000000000 --- a/tests/helpers/gen_write_load.tcl +++ /dev/null @@ -1,15 +0,0 @@ -source tests/support/redis.tcl - -proc gen_write_load {host port seconds} { - set start_time [clock seconds] - set r [redis $host $port 1] - $r select 9 - while 1 { - $r set [expr rand()] [expr rand()] - if {[clock seconds]-$start_time > $seconds} { - exit 0 - } - } -} - -gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] diff --git a/tests/helpers/redis_queue.py b/tests/helpers/redis_queue.py deleted file mode 100644 index 9203c2d0d..000000000 --- a/tests/helpers/redis_queue.py +++ /dev/null @@ -1,111 +0,0 @@ -import redis -import sys -import time -import threading -import signal - -START_FLAG = True - - -def enqueue(client: redis.Redis, queue_name: str): - while START_FLAG: - n = client.zcard(queue_name) - if n >= 1000: - time.sleep(0.1) - continue - now_ms = int(time.time() * 1000) - pipeline = client.pipeline(transaction=False) - for i in range(10): - score = now_ms << 5 | i - pipeline.zadd(queue_name, {str(score): score}) - pipeline.execute() - print("enqueue exit") - - -def dequeue(client: redis.Redis, queue_name: str): - loop = 0 - while START_FLAG: - start_time = time.time() - n = client.zcard(queue_name) - if n <= 10: - time.sleep(0.1) - continue - res = client.zremrangebyrank(queue_name, 0, 9) - latency = time.time() - start_time - loop += 1 - if loop % 20 == 0: - print("latency: {}ms".format(int(latency * 1000000)/1000)) - loop = 0 - print("dequeue exit") - - -def compact(client: redis.Redis, queue_name: str): - loop = 0 - while START_FLAG: - time.sleep(1) - loop += 1 - if loop % 60 == 0: - client.execute_command("compactrange", "db0", "zset", queue_name, queue_name) - print("compact queue {}".format(queue_name)) - loop = 0 - print("compact exit") - - -def auto_compact(client: redis.Redis): - client.config_set("max-cache-statistic-keys", 10000) - client.config_set("small-compaction-threshold", 10000) - client.config_set("small-compaction-duration-threshold", 10000) - - -def main(): - if len(sys.argv) != 5: - print("Usage: python redis_queue.py $redis_host $port $passwd [compact | auto_compact]") - sys.exit(1) - host = sys.argv[1] - port = int(sys.argv[2]) - passwd = sys.argv[3] - mode = sys.argv[4] - - thread_list = [] - queue_name = "test_queue" - - client_enqueue = redis.Redis(host=host, port=port, password=passwd) - t1 = threading.Thread(target=enqueue, args=(client_enqueue, queue_name)) - thread_list.append(t1) - - client_dequeue = redis.Redis(host=host, port=port, password=passwd) - t2 = threading.Thread(target=dequeue, args=(client_dequeue, queue_name)) - thread_list.append(t2) - - client_compact = redis.Redis(host=host, port=port, password=passwd) - if mode == "compact": - t3 = threading.Thread(target=compact, args=(client_compact, queue_name)) - thread_list.append(t3) - elif mode == "auto_compact": - auto_compact(client_compact) - else: - print("invalid compact mode: {}".format(mode)) - sys.exit(1) - - for t in thread_list: - t.start() - - def signal_handler(signal, frame): - print("revc signal: {}".format(signal)) - global START_FLAG - START_FLAG = False - for t in thread_list: - t.join() - print("exit") - sys.exit(0) - - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - signal.signal(signal.SIGQUIT, signal_handler) - - while True: - time.sleep(60) - - -if __name__ == "__main__": - main() diff --git a/tests/sentinel/run.tcl b/tests/sentinel/run.tcl deleted file mode 100644 index f33029959..000000000 --- a/tests/sentinel/run.tcl +++ /dev/null @@ -1,22 +0,0 @@ -# Sentinel test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com -# This software is released under the BSD License. See the COPYING file for -# more information. - -cd tests/sentinel -source ../instances.tcl - -set ::instances_count 5 ; # How many instances we use at max. - -proc main {} { - parse_options - spawn_instance sentinel $::sentinel_base_port $::instances_count - spawn_instance redis $::redis_base_port $::instances_count - run_tests - cleanup -} - -if {[catch main e]} { - puts $::errorInfo - cleanup - exit 1 -} diff --git a/tests/sentinel/tests/00-base.tcl b/tests/sentinel/tests/00-base.tcl deleted file mode 100644 index a79d0c371..000000000 --- a/tests/sentinel/tests/00-base.tcl +++ /dev/null @@ -1,126 +0,0 @@ -# Check the basic monitoring and failover capabilities. - -source "../tests/includes/init-tests.tcl" - -if {$::simulate_error} { - test "This test will fail" { - fail "Simulated error" - } -} - -test "Basic failover works if the master is down" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} - -test "All the other slaves now point to the new master" { - foreach_redis_id id { - if {$id != $master_id && $id != 0} { - wait_for_condition 1000 50 { - [RI $id master_port] == [lindex $addr 1] - } else { - fail "Redis ID $id not configured to replicate with new master" - } - } - } -} - -test "The old master eventually gets reconfigured as a slave" { - wait_for_condition 1000 50 { - [RI 0 master_port] == [lindex $addr 1] - } else { - fail "Old master not reconfigured as slave of new master" - } -} - -test "ODOWN is not possible without N (quorum) Sentinels reports" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum [expr $sentinels+1] - } - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - - # Make sure failover did not happened. - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - restart_instance redis $master_id -} - -test "Failover is not possible without majority agreement" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $quorum - } - - # Crash majority of sentinels - for {set id 0} {$id < $quorum} {incr id} { - kill_instance sentinel $id - } - - # Kill the current master - kill_instance redis $master_id - - # Make sure failover did not happened. - set addr [S $quorum SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - restart_instance redis $master_id - - # Cleanup: restart Sentinels to monitor the master. - for {set id 0} {$id < $quorum} {incr id} { - restart_instance sentinel $id - } -} - -test "Failover works if we configure for absolute agreement" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $sentinels - } - - # Wait for Sentinels to monitor the master again - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] info-refresh] < 100000 - } else { - fail "At least one Sentinel is not monitoring the master" - } - } - - kill_instance redis $master_id - - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] - - # Set the min ODOWN agreement back to strict majority. - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $quorum - } -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} diff --git a/tests/sentinel/tests/01-conf-update.tcl b/tests/sentinel/tests/01-conf-update.tcl deleted file mode 100644 index 4998104d2..000000000 --- a/tests/sentinel/tests/01-conf-update.tcl +++ /dev/null @@ -1,39 +0,0 @@ -# Test Sentinel configuration consistency after partitions heal. - -source "../tests/includes/init-tests.tcl" - -test "We can failover with Sentinel 1 crashed" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - - # Crash Sentinel 1 - kill_instance sentinel 1 - - kill_instance redis $master_id - foreach_sentinel_id id { - if {$id != 1} { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "Sentinel $id did not received failover info" - } - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "After Sentinel 1 is restarted, its config gets updated" { - restart_instance sentinel 1 - wait_for_condition 1000 50 { - [lindex [S 1 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "Restarted Sentinel did not received failover info" - } -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} diff --git a/tests/sentinel/tests/02-slaves-reconf.tcl b/tests/sentinel/tests/02-slaves-reconf.tcl deleted file mode 100644 index fa15d2efd..000000000 --- a/tests/sentinel/tests/02-slaves-reconf.tcl +++ /dev/null @@ -1,84 +0,0 @@ -# Check that slaves are reconfigured at a latter time if they are partitioned. -# -# Here we should test: -# 1) That slaves point to the new master after failover. -# 2) That partitioned slaves point to new master when they are partitioned -# away during failover and return at a latter time. - -source "../tests/includes/init-tests.tcl" - -proc 02_test_slaves_replication {} { - uplevel 1 { - test "Check that slaves replicate from current master" { - set master_port [RI $master_id tcp_port] - foreach_redis_id id { - if {$id == $master_id} continue - if {[instance_is_killed redis $id]} continue - wait_for_condition 1000 50 { - ([RI $id master_port] == $master_port) && - ([RI $id master_link_status] eq {up}) - } else { - fail "Redis slave $id is replicating from wrong master" - } - } - } - } -} - -proc 02_crash_and_failover {} { - uplevel 1 { - test "Crash the master and force a failover" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] - } - } -} - -02_test_slaves_replication -02_crash_and_failover -02_test_slaves_replication - -test "Kill a slave instance" { - foreach_redis_id id { - if {$id == $master_id} continue - set killed_slave_id $id - kill_instance redis $id - break - } -} - -02_crash_and_failover -02_test_slaves_replication - -test "Wait for failover to end" { - set inprogress 1 - while {$inprogress} { - set inprogress 0 - foreach_sentinel_id id { - if {[dict exists [S $id SENTINEL MASTER mymaster] failover-state]} { - incr inprogress - } - } - if {$inprogress} {after 100} - } -} - -test "Restart killed slave and test replication of slaves again..." { - restart_instance redis $killed_slave_id -} - -# Now we check if the slave rejoining the partition is reconfigured even -# if the failover finished. -02_test_slaves_replication diff --git a/tests/sentinel/tests/03-runtime-reconf.tcl b/tests/sentinel/tests/03-runtime-reconf.tcl deleted file mode 100644 index 426596c37..000000000 --- a/tests/sentinel/tests/03-runtime-reconf.tcl +++ /dev/null @@ -1 +0,0 @@ -# Test runtime reconfiguration command SENTINEL SET. diff --git a/tests/sentinel/tests/04-slave-selection.tcl b/tests/sentinel/tests/04-slave-selection.tcl deleted file mode 100644 index 3d2ca6484..000000000 --- a/tests/sentinel/tests/04-slave-selection.tcl +++ /dev/null @@ -1,5 +0,0 @@ -# Test slave selection algorithm. -# -# This unit should test: -# 1) That when there are no suitable slaves no failover is performed. -# 2) That among the available slaves, the one with better offset is picked. diff --git a/tests/sentinel/tests/05-manual.tcl b/tests/sentinel/tests/05-manual.tcl deleted file mode 100644 index 1a60d814b..000000000 --- a/tests/sentinel/tests/05-manual.tcl +++ /dev/null @@ -1,44 +0,0 @@ -# Test manual failover - -source "../tests/includes/init-tests.tcl" - -test "Manual failover works" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - S 0 SENTINEL FAILOVER mymaster - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} - -test "All the other slaves now point to the new master" { - foreach_redis_id id { - if {$id != $master_id && $id != 0} { - wait_for_condition 1000 50 { - [RI $id master_port] == [lindex $addr 1] - } else { - fail "Redis ID $id not configured to replicate with new master" - } - } - } -} - -test "The old master eventually gets reconfigured as a slave" { - wait_for_condition 1000 50 { - [RI 0 master_port] == [lindex $addr 1] - } else { - fail "Old master not reconfigured as slave of new master" - } -} - diff --git a/tests/sentinel/tests/includes/init-tests.tcl b/tests/sentinel/tests/includes/init-tests.tcl deleted file mode 100644 index c8165dcfa..000000000 --- a/tests/sentinel/tests/includes/init-tests.tcl +++ /dev/null @@ -1,72 +0,0 @@ -# Initialization tests -- most units will start including this. - -test "(init) Restart killed instances" { - foreach type {redis sentinel} { - foreach_${type}_id id { - if {[get_instance_attrib $type $id pid] == -1} { - puts -nonewline "$type/$id " - flush stdout - restart_instance $type $id - } - } - } -} - -test "(init) Remove old master entry from sentinels" { - foreach_sentinel_id id { - catch {S $id SENTINEL REMOVE mymaster} - } -} - -set redis_slaves 4 -test "(init) Create a master-slaves cluster of [expr $redis_slaves+1] instances" { - create_redis_master_slave_cluster [expr {$redis_slaves+1}] -} -set master_id 0 - -test "(init) Sentinels can start monitoring a master" { - set sentinels [llength $::sentinel_instances] - set quorum [expr {$sentinels/2+1}] - foreach_sentinel_id id { - S $id SENTINEL MONITOR mymaster \ - [get_instance_attrib redis $master_id host] \ - [get_instance_attrib redis $master_id port] $quorum - } - foreach_sentinel_id id { - assert {[S $id sentinel master mymaster] ne {}} - S $id SENTINEL SET mymaster down-after-milliseconds 2000 - S $id SENTINEL SET mymaster failover-timeout 20000 - S $id SENTINEL SET mymaster parallel-syncs 10 - } -} - -test "(init) Sentinels can talk with the master" { - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [catch {S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster}] == 0 - } else { - fail "Sentinel $id can't talk with the master." - } - } -} - -test "(init) Sentinels are able to auto-discover other sentinels" { - set sentinels [llength $::sentinel_instances] - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] num-other-sentinels] == ($sentinels-1) - } else { - fail "At least some sentinel can't detect some other sentinel" - } - } -} - -test "(init) Sentinels are able to auto-discover slaves" { - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] num-slaves] == $redis_slaves - } else { - fail "At least some sentinel can't detect some slave" - } - } -} diff --git a/tests/sentinel/tmp/.gitignore b/tests/sentinel/tmp/.gitignore deleted file mode 100644 index f581f73e2..000000000 --- a/tests/sentinel/tmp/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -redis_* -sentinel_* diff --git a/tests/support/redis.tcl b/tests/support/redis.tcl deleted file mode 100644 index 7c7836081..000000000 --- a/tests/support/redis.tcl +++ /dev/null @@ -1,294 +0,0 @@ -# Tcl clinet library - used by test-redis.tcl script for now -# Copyright (C) 2009 Salvatore Sanfilippo -# Released under the BSD license like Redis itself -# -# Example usage: -# -# set r [redis 127.0.0.1 6379] -# $r lpush mylist foo -# $r lpush mylist bar -# $r lrange mylist 0 -1 -# $r close -# -# Non blocking usage example: -# -# proc handlePong {r type reply} { -# puts "PONG $type '$reply'" -# if {$reply ne "PONG"} { -# $r ping [list handlePong] -# } -# } -# -# set r [redis] -# $r blocking 0 -# $r get fo [list handlePong] -# -# vwait forever - -package require Tcl 8.5 -package provide redis 0.1 - -namespace eval redis {} -set ::redis::id 0 -array set ::redis::fd {} -array set ::redis::addr {} -array set ::redis::blocking {} -array set ::redis::deferred {} -array set ::redis::reconnect {} -array set ::redis::callback {} -array set ::redis::state {} ;# State in non-blocking reply reading -array set ::redis::statestack {} ;# Stack of states, for nested mbulks - -proc redis {{server 127.0.0.1} {port 6379} {defer 0}} { - set fd [socket $server $port] - fconfigure $fd -translation binary - set id [incr ::redis::id] - set ::redis::fd($id) $fd - set ::redis::addr($id) [list $server $port] - set ::redis::blocking($id) 1 - set ::redis::deferred($id) $defer - set ::redis::reconnect($id) 0 - ::redis::redis_reset_state $id - interp alias {} ::redis::redisHandle$id {} ::redis::__dispatch__ $id -} - -# This is a wrapper to the actual dispatching procedure that handles -# reconnection if needed. -proc ::redis::__dispatch__ {id method args} { - set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval] - if {$errorcode && $::redis::reconnect($id) && $::redis::fd($id) eq {}} { - # Try again if the connection was lost. - # FIXME: we don't re-select the previously selected DB, nor we check - # if we are inside a transaction that needs to be re-issued from - # scratch. - set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval] - } - return -code $errorcode $retval -} - -proc ::redis::__dispatch__raw__ {id method argv} { - set fd $::redis::fd($id) - - # Reconnect the link if needed. - if {$fd eq {}} { - lassign $::redis::addr($id) host port - set ::redis::fd($id) [socket $host $port] - fconfigure $::redis::fd($id) -translation binary - set fd $::redis::fd($id) - } - - set blocking $::redis::blocking($id) - set deferred $::redis::deferred($id) - if {$blocking == 0} { - if {[llength $argv] == 0} { - error "Please provide a callback in non-blocking mode" - } - set callback [lindex $argv end] - set argv [lrange $argv 0 end-1] - } - if {[info command ::redis::__method__$method] eq {}} { - set cmd "*[expr {[llength $argv]+1}]\r\n" - append cmd "$[string length $method]\r\n$method\r\n" - foreach a $argv { - append cmd "$[string length $a]\r\n$a\r\n" - } - ::redis::redis_write $fd $cmd - if {[catch {flush $fd}]} { - set ::redis::fd($id) {} - return -code error "I/O error reading reply" - } - - if {!$deferred} { - if {$blocking} { - ::redis::redis_read_reply $id $fd - } else { - # Every well formed reply read will pop an element from this - # list and use it as a callback. So pipelining is supported - # in non blocking mode. - lappend ::redis::callback($id) $callback - fileevent $fd readable [list ::redis::redis_readable $fd $id] - } - } - } else { - uplevel 1 [list ::redis::__method__$method $id $fd] $argv - } -} - -proc ::redis::__method__blocking {id fd val} { - set ::redis::blocking($id) $val - fconfigure $fd -blocking $val -} - -proc ::redis::__method__reconnect {id fd val} { - set ::redis::reconnect($id) $val -} - -proc ::redis::__method__read {id fd} { - ::redis::redis_read_reply $id $fd -} - -proc ::redis::__method__write {id fd buf} { - ::redis::redis_write $fd $buf -} - -proc ::redis::__method__flush {id fd} { - flush $fd -} - -proc ::redis::__method__close {id fd} { - catch {close $fd} - catch {unset ::redis::fd($id)} - catch {unset ::redis::addr($id)} - catch {unset ::redis::blocking($id)} - catch {unset ::redis::deferred($id)} - catch {unset ::redis::reconnect($id)} - catch {unset ::redis::state($id)} - catch {unset ::redis::statestack($id)} - catch {unset ::redis::callback($id)} - catch {interp alias {} ::redis::redisHandle$id {}} -} - -proc ::redis::__method__channel {id fd} { - return $fd -} - -proc ::redis::__method__deferred {id fd val} { - set ::redis::deferred($id) $val -} - -proc ::redis::redis_write {fd buf} { - puts -nonewline $fd $buf -} - -proc ::redis::redis_writenl {fd buf} { - redis_write $fd $buf - redis_write $fd "\r\n" - flush $fd -} - -proc ::redis::redis_readnl {fd len} { - set buf [read $fd $len] - read $fd 2 ; # discard CR LF - return $buf -} - -proc ::redis::redis_bulk_read {fd} { - set count [redis_read_line $fd] - if {$count == -1} return {} - set buf [redis_readnl $fd $count] - return $buf -} - -proc ::redis::redis_multi_bulk_read {id fd} { - set count [redis_read_line $fd] - if {$count == -1} return {} - set l {} - set err {} - for {set i 0} {$i < $count} {incr i} { - if {[catch { - lappend l [redis_read_reply $id $fd] - } e] && $err eq {}} { - set err $e - } - } - if {$err ne {}} {return -code error $err} - return $l -} - -proc ::redis::redis_read_line fd { - string trim [gets $fd] -} - -proc ::redis::redis_read_reply {id fd} { - set type [read $fd 1] - switch -exact -- $type { - : - - + {redis_read_line $fd} - - {return -code error [redis_read_line $fd]} - $ {redis_bulk_read $fd} - * {redis_multi_bulk_read $id $fd} - default { - if {$type eq {}} { - set ::redis::fd($id) {} - return -code error "I/O error reading reply" - } - return -code error "Bad protocol, '$type' as reply type byte" - } - } -} - -proc ::redis::redis_reset_state id { - set ::redis::state($id) [dict create buf {} mbulk -1 bulk -1 reply {}] - set ::redis::statestack($id) {} -} - -proc ::redis::redis_call_callback {id type reply} { - set cb [lindex $::redis::callback($id) 0] - set ::redis::callback($id) [lrange $::redis::callback($id) 1 end] - uplevel #0 $cb [list ::redis::redisHandle$id $type $reply] - ::redis::redis_reset_state $id -} - -# Read a reply in non-blocking mode. -proc ::redis::redis_readable {fd id} { - if {[eof $fd]} { - redis_call_callback $id eof {} - ::redis::__method__close $id $fd - return - } - if {[dict get $::redis::state($id) bulk] == -1} { - set line [gets $fd] - if {$line eq {}} return ;# No complete line available, return - switch -exact -- [string index $line 0] { - : - - + {redis_call_callback $id reply [string range $line 1 end-1]} - - {redis_call_callback $id err [string range $line 1 end-1]} - $ { - dict set ::redis::state($id) bulk \ - [expr [string range $line 1 end-1]+2] - if {[dict get $::redis::state($id) bulk] == 1} { - # We got a $-1, hack the state to play well with this. - dict set ::redis::state($id) bulk 2 - dict set ::redis::state($id) buf "\r\n" - ::redis::redis_readable $fd $id - } - } - * { - dict set ::redis::state($id) mbulk [string range $line 1 end-1] - # Handle *-1 - if {[dict get $::redis::state($id) mbulk] == -1} { - redis_call_callback $id reply {} - } - } - default { - redis_call_callback $id err \ - "Bad protocol, $type as reply type byte" - } - } - } else { - set totlen [dict get $::redis::state($id) bulk] - set buflen [string length [dict get $::redis::state($id) buf]] - set toread [expr {$totlen-$buflen}] - set data [read $fd $toread] - set nread [string length $data] - dict append ::redis::state($id) buf $data - # Check if we read a complete bulk reply - if {[string length [dict get $::redis::state($id) buf]] == - [dict get $::redis::state($id) bulk]} { - if {[dict get $::redis::state($id) mbulk] == -1} { - redis_call_callback $id reply \ - [string range [dict get $::redis::state($id) buf] 0 end-2] - } else { - dict with ::redis::state($id) { - lappend reply [string range $buf 0 end-2] - incr mbulk -1 - set bulk -1 - } - if {[dict get $::redis::state($id) mbulk] == 0} { - redis_call_callback $id reply \ - [dict get $::redis::state($id) reply] - } - } - } - } -} diff --git a/tests/support/server.tcl b/tests/support/server.tcl deleted file mode 100644 index d6ced093a..000000000 --- a/tests/support/server.tcl +++ /dev/null @@ -1,349 +0,0 @@ -set ::global_overrides {} -set ::tags {} -set ::valgrind_errors {} - -proc start_server_error {config_file error} { - set err {} - append err "Cant' start the Redis server\n" - append err "CONFIGURATION:" - append err [exec cat $config_file] - append err "\nERROR:" - append err [string trim $error] - send_data_packet $::test_server_fd err $err -} - -proc check_valgrind_errors stderr { - set fd [open $stderr] - set buf [read $fd] - close $fd - - if {[regexp -- { at 0x} $buf] || - (![regexp -- {definitely lost: 0 bytes} $buf] && - ![regexp -- {no leaks are possible} $buf])} { - send_data_packet $::test_server_fd err "Valgrind error: $buf\n" - } -} - -proc kill_server config { - # nothing to kill when running against external server - if {$::external} return - - # nevermind if its already dead - if {![is_alive $config]} { return } - set pid [dict get $config pid] - - # check for leaks - if {![dict exists $config "skipleaks"]} { - catch { - if {[string match {*Darwin*} [exec uname -a]]} { - tags {"leaks"} { - test "Check for memory leaks (pid $pid)" { - set output {0 leaks} - catch {exec leaks $pid} output - if {[string match {*process does not exist*} $output] || - [string match {*cannot examine*} $output]} { - # In a few tests we kill the server process. - set output "0 leaks" - } - set output - } {*0 leaks*} - } - } - } - } - - # kill server and wait for the process to be totally exited - catch {exec kill $pid} - while {[is_alive $config]} { - incr wait 10 - - if {$wait >= 5000} { - puts "Forcing process $pid to exit..." - catch {exec kill -KILL $pid} - } elseif {$wait % 1000 == 0} { - puts "Waiting for process $pid to exit..." - } - after 10 - } - - # Check valgrind errors if needed - if {$::valgrind} { - check_valgrind_errors [dict get $config stderr] - } - - # Remove this pid from the set of active pids in the test server. - send_data_packet $::test_server_fd server-killed $pid -} - -proc is_alive config { - set pid [dict get $config pid] - if {[catch {exec ps -p $pid} err]} { - return 0 - } else { - return 1 - } -} - -proc ping_server {host port} { - set retval 0 - if {[catch { - set fd [socket $host $port] - fconfigure $fd -translation binary - puts $fd "PING\r\n" - flush $fd - set reply [gets $fd] - if {[string range $reply 0 0] eq {+} || - [string range $reply 0 0] eq {-}} { - set retval 1 - } - close $fd - } e]} { - if {$::verbose} { - puts -nonewline "." - } - } else { - if {$::verbose} { - puts -nonewline "ok" - } - } - return $retval -} - -# Return 1 if the server at the specified addr is reachable by PING, otherwise -# returns 0. Performs a try every 50 milliseconds for the specified number -# of retries. -proc server_is_up {host port retrynum} { - after 10 ;# Use a small delay to make likely a first-try success. - set retval 0 - while {[incr retrynum -1]} { - if {[catch {ping_server $host $port} ping]} { - set ping 0 - } - if {$ping} {return 1} - after 50 - } - return 0 -} - -# doesn't really belong here, but highly coupled to code in start_server -proc tags {tags code} { - set ::tags [concat $::tags $tags] - uplevel 1 $code - set ::tags [lrange $::tags 0 end-[llength $tags]] -} - -proc start_server {options {code undefined}} { - # If we are running against an external server, we just push the - # host/port pair in the stack the first time - if {$::external} { - if {[llength $::servers] == 0} { - set srv {} - dict set srv "host" $::host - dict set srv "port" $::port - set client [redis $::host $::port] - dict set srv "client" $client - # $client select 9 - - # append the server to the stack - lappend ::servers $srv - } - uplevel 1 $code - return - } - - # setup defaults - set baseconfig "default.conf" - set overrides {} - set tags {} - - # parse options - foreach {option value} $options { - switch $option { - "config" { - set baseconfig $value } - "overrides" { - set overrides $value } - "tags" { - set tags $value - set ::tags [concat $::tags $value] } - default { - error "Unknown option $option" } - } - } - - set data [split [exec cat "tests/assets/$baseconfig"] "\n"] - set config {} - foreach line $data { - if {[string length $line] == 0 || [string index $line 0] eq "#"} { - continue - } - - set parts [regexp -all -inline {\S+} $line] - set directive [lindex $parts 0] - - if {[llength $parts] > 1} { - set arguments [lrange $parts 1 end] - - set formatted_args {} - foreach arg $arguments { - lappend formatted_args [string trim $arg "{}"] - } - - dict set config $directive $formatted_args - } else { - dict set config $directive "" - } - } - - # use a different directory every time a server is started - dict set config dir [tmpdir server] - - # start every server on a different port - set ::port [find_available_port [expr {$::port+1}]] - dict set config port $::port - - # start every server on a different path - dict set config log-path ./logs$::port/ - dict set config db-path ./db$::port/ - # dict set config dump-path ./dump$::port/ - - # apply overrides from global space and arguments - foreach {directive arguments} [concat $::global_overrides $overrides] { - dict set config $directive $arguments - } - - # write new configuration to temporary file - set config_file [tmpfile redis.conf] - set fp [open $config_file w+] - foreach directive [dict keys $config] { - set value [dict get $config $directive] - if {[string is integer -strict $value]} { - puts $fp "$directive $value" - } else { - puts $fp "$directive $value" - } - } - close $fp - set stdout [format "%s/%s" [dict get $config "dir"] "stdout"] - set stderr [format "%s/%s" [dict get $config "dir"] "stderr"] - - if {$::valgrind} { - set pid [exec valgrind --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &] - } else { - #set pid [exec src/redis-server -c $config_file > $stdout 2> $stderr &] - set pid [exec src/redis-server $config_file > $stdout 2> $stderr &] - } - - puts "Starting ---- " - - # Tell the test server about this new instance. - send_data_packet $::test_server_fd server-spawned $pid - - # check that the server actually started - # ugly but tries to be as fast as possible... - if {$::valgrind} {set retrynum 1000} else {set retrynum 100} - - if {$::verbose} { - puts -nonewline "=== ($tags) Starting server ${::host}:${::port} " - } - - if {$code ne "undefined"} { - set serverisup [server_is_up $::host $::port $retrynum] - } else { - set serverisup 1 - } - - if {$::verbose} { - puts "" - } - - if {!$serverisup} { - set err {} - append err [exec cat $stdout] "\n" [exec cat $stderr] - start_server_error $config_file $err - return - } - - puts "Before Wait" - # Wait for actual startup - #while {![info exists _pid]} { - # regexp {PID:\s(\d+)} [exec cat $stdout] _ _pid - # after 100 - #} - puts "After Wait" - - # setup properties to be able to initialize a client object - set host $::host - set port $::port - if {[dict exists $config bind]} { set host [dict get $config bind] } - if {[dict exists $config port]} { set port [dict get $config port] } - - # setup config dict - dict set srv "config_file" $config_file - dict set srv "config" $config - dict set srv "pid" $pid - dict set srv "host" $host - dict set srv "port" $port - dict set srv "stdout" $stdout - dict set srv "stderr" $stderr - - # if a block of code is supplied, we wait for the server to become - # available, create a client object and kill the server afterwards - if {$code ne "undefined"} { - set line [exec head -n1 $stdout] - if {[string match {*already in use*} $line]} { - error_and_quit $config_file $line - } - - # while 1 { - # # check that the server actually started and is ready for connections - # if {[exec grep "going to start" | wc -l < $stderr] > 0} { - # break - # } - # puts "Fuck YYB" - # after 10 - #} - - # append the server to the stack - lappend ::servers $srv - - # connect client (after server dict is put on the stack) - reconnect - - # execute provided block - set num_tests $::num_tests - if {[catch { uplevel 1 $code } error]} { - set backtrace $::errorInfo - - # Kill the server without checking for leaks - dict set srv "skipleaks" 1 - kill_server $srv - - # Print warnings from log - puts [format "\nLogged warnings (pid %d):" [dict get $srv "pid"]] - set warnings [warnings_from_file [dict get $srv "stdout"]] - if {[string length $warnings] > 0} { - puts "$warnings" - } else { - puts "(none)" - } - puts "" - - error $error $backtrace - } - - # Don't do the leak check when no tests were run - if {$num_tests == $::num_tests} { - dict set srv "skipleaks" 1 - } - - # pop the server object - set ::servers [lrange $::servers 0 end-1] - - set ::tags [lrange $::tags 0 end-[llength $tags]] - kill_server $srv - } else { - set ::tags [lrange $::tags 0 end-[llength $tags]] - set _ $srv - } -} diff --git a/tests/support/test.tcl b/tests/support/test.tcl deleted file mode 100644 index d9b81f2d9..000000000 --- a/tests/support/test.tcl +++ /dev/null @@ -1,153 +0,0 @@ -set ::num_tests 0 -set ::num_passed 0 -set ::num_failed 0 -set ::tests_failed {} - -proc fail {msg} { - error "assertion:$msg" -} - -proc assert {condition} { - if {![uplevel 1 [list expr $condition]]} { - error "assertion:Expected condition '$condition' to be true ([uplevel 1 [list subst -nocommands $condition]])" - } -} - -proc assert_no_match {pattern value} { - if {[string match $pattern $value]} { - set context "(context: [info frame -1])" - error "assertion:Expected '$value' to not match '$pattern' $context" - } -} - -proc assert_failed {expected_err detail} { - if {$detail ne ""} { - set detail "(detail: $detail)" - } else { - set detail "(context: [info frame -2])" - } - error "assertion:$expected_err $detail" -} - -proc assert_not_equal {value expected {detail ""}} { - if {!($expected ne $value)} { - assert_failed "Expected '$value' not equal to '$expected'" $detail - } -} - -proc assert_match {pattern value} { - if {![string match $pattern $value]} { - error "assertion:Expected '$value' to match '$pattern'" - } -} - -proc assert_equal {expected value} { - if {$expected ne $value} { - error "assertion:Expected '$value' to be equal to '$expected'" - } -} - -proc assert_error {pattern code} { - if {[catch {uplevel 1 $code} error]} { - assert_match $pattern $error - } else { - error "assertion:Expected an error but nothing was caught" - } -} - -proc assert_encoding {enc key} { - # Swapped out values don't have an encoding, so make sure that - # the value is swapped in before checking the encoding. - set dbg [r debug object $key] - while {[string match "* swapped at:*" $dbg]} { - r debug swapin $key - set dbg [r debug object $key] - } - assert_match "* encoding:$enc *" $dbg -} - -proc assert_type {type key} { - assert_equal $type [r type $key] -} - -# Wait for the specified condition to be true, with the specified number of -# max retries and delay between retries. Otherwise the 'elsescript' is -# executed. -proc wait_for_condition {maxtries delay e _else_ elsescript} { - while {[incr maxtries -1] >= 0} { - set errcode [catch {uplevel 1 [list expr $e]} result] - if {$errcode == 0} { - if {$result} break - } else { - return -code $errcode $result - } - after $delay - } - if {$maxtries == -1} { - set errcode [catch [uplevel 1 $elsescript] result] - return -code $errcode $result - } -} - -proc test {name code {okpattern undefined}} { - # abort if tagged with a tag to deny - foreach tag $::denytags { - if {[lsearch $::tags $tag] >= 0} { - return - } - } - - # check if tagged with at least 1 tag to allow when there *is* a list - # of tags to allow, because default policy is to run everything - if {[llength $::allowtags] > 0} { - set matched 0 - foreach tag $::allowtags { - if {[lsearch $::tags $tag] >= 0} { - incr matched - } - } - if {$matched < 1} { - return - } - } - - incr ::num_tests - set details {} - lappend details "$name in $::curfile" - - send_data_packet $::test_server_fd testing $name - - if {[catch {set retval [uplevel 1 $code]} error]} { - if {[string match "assertion:*" $error]} { - set msg [string range $error 10 end] - lappend details $msg - lappend ::tests_failed $details - - incr ::num_failed - send_data_packet $::test_server_fd err [join $details "\n"] - } else { - # Re-raise, let handler up the stack take care of this. - error $error $::errorInfo - } - } else { - if {$okpattern eq "undefined" || $okpattern eq $retval || [string match $okpattern $retval]} { - incr ::num_passed - send_data_packet $::test_server_fd ok $name - } else { - set msg "Expected '$okpattern' to equal or match '$retval'" - lappend details $msg - lappend ::tests_failed $details - - incr ::num_failed - send_data_packet $::test_server_fd err [join $details "\n"] - } - } - - if {$::traceleaks} { - set output [exec leaks redis-server] - if {![string match {*0 leaks*} $output]} { - send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" - } - } -} - diff --git a/tests/support/tmpfile.tcl b/tests/support/tmpfile.tcl deleted file mode 100644 index 809f58730..000000000 --- a/tests/support/tmpfile.tcl +++ /dev/null @@ -1,15 +0,0 @@ -set ::tmpcounter 0 -set ::tmproot "./tests/tmp" -file mkdir $::tmproot - -# returns a dirname unique to this process to write to -proc tmpdir {basename} { - set dir [file join $::tmproot $basename.[pid].[incr ::tmpcounter]] - file mkdir $dir - set _ $dir -} - -# return a filename unique to this process to write to -proc tmpfile {basename} { - file join $::tmproot $basename.[pid].[incr ::tmpcounter] -} diff --git a/tests/support/util.tcl b/tests/support/util.tcl deleted file mode 100644 index cc628784a..000000000 --- a/tests/support/util.tcl +++ /dev/null @@ -1,400 +0,0 @@ -proc randstring {min max {type binary}} { - set len [expr {$min+int(rand()*($max-$min+1))}] - set output {} - if {$type eq {binary}} { - set minval 0 - set maxval 255 - } elseif {$type eq {alpha}} { - set minval 48 - set maxval 122 - } elseif {$type eq {compr}} { - set minval 48 - set maxval 52 - } - while {$len} { - append output [format "%c" [expr {$minval+int(rand()*($maxval-$minval+1))}]] - incr len -1 - } - return $output -} - -# Useful for some test -proc zlistAlikeSort {a b} { - if {[lindex $a 0] > [lindex $b 0]} {return 1} - if {[lindex $a 0] < [lindex $b 0]} {return -1} - string compare [lindex $a 1] [lindex $b 1] -} - -# Return all log lines starting with the first line that contains a warning. -# Generally, this will be an assertion error with a stack trace. -proc warnings_from_file {filename} { - set lines [split [exec cat $filename] "\n"] - set matched 0 - set logall 0 - set result {} - foreach line $lines { - if {[string match {*REDIS BUG REPORT START*} $line]} { - set logall 1 - } - if {[regexp {^\[\d+\]\s+\d+\s+\w+\s+\d{2}:\d{2}:\d{2} \#} $line]} { - set matched 1 - } - if {$logall || $matched} { - lappend result $line - } - } - join $result "\n" -} - -# Return value for INFO property -proc status {r property} { - if {[regexp "\r\n$property:(.*?)\r\n" [{*}$r info] _ value]} { - set _ $value - } -} - -proc waitForBgsave r { - while 1 { - if {[status r rdb_bgsave_in_progress] eq 1} { - if {$::verbose} { - puts -nonewline "\nWaiting for background save to finish... " - flush stdout - } - after 1000 - } else { - break - } - } -} - -proc waitForBgrewriteaof r { - while 1 { - if {[status r aof_rewrite_in_progress] eq 1} { - if {$::verbose} { - puts -nonewline "\nWaiting for background AOF rewrite to finish... " - flush stdout - } - after 1000 - } else { - break - } - } -} - -proc wait_for_sync r { - while 1 { - if {[status $r master_link_status] eq "down"} { - after 10 - } else { - break - } - } -} - -# Random integer between 0 and max (excluded). -proc randomInt {max} { - expr {int(rand()*$max)} -} - -# Random signed integer between -max and max (both extremes excluded). -proc randomSignedInt {max} { - set i [randomInt $max] - if {rand() > 0.5} { - set i -$i - } - return $i -} - -proc randpath args { - set path [expr {int(rand()*[llength $args])}] - uplevel 1 [lindex $args $path] -} - -proc randomValue {} { - randpath { - # Small enough to likely collide - randomSignedInt 1000 - } { - # 32 bit compressible signed/unsigned - randpath {randomSignedInt 2000000000} {randomSignedInt 4000000000} - } { - # 64 bit - randpath {randomSignedInt 1000000000000} - } { - # Random string - randpath {randstring 0 256 alpha} \ - {randstring 0 256 compr} \ - {randstring 0 256 binary} - } -} - -proc randomKey {} { - randpath { - # Small enough to likely collide - randomInt 1000 - } { - # 32 bit compressible signed/unsigned - randpath {randomInt 2000000000} {randomInt 4000000000} - } { - # 64 bit - randpath {randomInt 1000000000000} - } { - # Random string - randpath {randstring 1 256 alpha} \ - {randstring 1 256 compr} - } -} - -proc findKeyWithType {r type} { - for {set j 0} {$j < 20} {incr j} { - set k [{*}$r randomkey] - if {$k eq {}} { - return {} - } - if {[{*}$r type $k] eq $type} { - return $k - } - } - return {} -} - -proc createComplexDataset {r ops {opt {}}} { - for {set j 0} {$j < $ops} {incr j} { - set k [randomKey] - set k2 [randomKey] - set f [randomValue] - set v [randomValue] - - if {[lsearch -exact $opt useexpire] != -1} { - if {rand() < 0.1} { - {*}$r expire [randomKey] [randomInt 2] - } - } - - randpath { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - randpath {set d +inf} {set d -inf} - } - set t [{*}$r type $k] - - if {$t eq {none}} { - randpath { - {*}$r set $k $v - } { - {*}$r lpush $k $v - } { - {*}$r sadd $k $v - } { - {*}$r zadd $k $d $v - } { - {*}$r hset $k $f $v - } { - {*}$r del $k - } - set t [{*}$r type $k] - } - - switch $t { - {string} { - # Nothing to do - } - {list} { - randpath {{*}$r lpush $k $v} \ - {{*}$r rpush $k $v} \ - {{*}$r lrem $k 0 $v} \ - {{*}$r rpop $k} \ - {{*}$r lpop $k} - } - {set} { - randpath {{*}$r sadd $k $v} \ - {{*}$r srem $k $v} \ - { - set otherset [findKeyWithType {*}$r set] - if {$otherset ne {}} { - randpath { - {*}$r sunionstore $k2 $k $otherset - } { - {*}$r sinterstore $k2 $k $otherset - } { - {*}$r sdiffstore $k2 $k $otherset - } - } - } - } - {zset} { - randpath {{*}$r zadd $k $d $v} \ - {{*}$r zrem $k $v} \ - { - set otherzset [findKeyWithType {*}$r zset] - if {$otherzset ne {}} { - randpath { - {*}$r zunionstore $k2 2 $k $otherzset - } { - {*}$r zinterstore $k2 2 $k $otherzset - } - } - } - } - {hash} { - randpath {{*}$r hset $k $f $v} \ - {{*}$r hdel $k $f} - } - } - } -} - -proc formatCommand {args} { - set cmd "*[llength $args]\r\n" - foreach a $args { - append cmd "$[string length $a]\r\n$a\r\n" - } - set _ $cmd -} - -proc csvdump r { - set o {} - foreach k [lsort [{*}$r keys *]] { - set type [{*}$r type $k] - append o [csvstring $k] , [csvstring $type] , - switch $type { - string { - append o [csvstring [{*}$r get $k]] "\n" - } - list { - foreach e [{*}$r lrange $k 0 -1] { - append o [csvstring $e] , - } - append o "\n" - } - set { - foreach e [lsort [{*}$r smembers $k]] { - append o [csvstring $e] , - } - append o "\n" - } - zset { - foreach e [{*}$r zrange $k 0 -1 withscores] { - append o [csvstring $e] , - } - append o "\n" - } - hash { - set fields [{*}$r hgetall $k] - set newfields {} - foreach {k v} $fields { - lappend newfields [list $k $v] - } - set fields [lsort -index 0 $newfields] - foreach kv $fields { - append o [csvstring [lindex $kv 0]] , - append o [csvstring [lindex $kv 1]] , - } - append o "\n" - } - } - } - return $o -} - -proc csvstring s { - return "\"$s\"" -} - -proc roundFloat f { - format "%.10g" $f -} - -proc find_available_port start { - for {set j $start} {$j < $start+1024} {incr j} { - if {[catch { - set fd [socket 127.0.0.1 $j] - }]} { - return $j - } else { - close $fd - } - } - if {$j == $start+1024} { - error "Can't find a non busy port in the $start-[expr {$start+1023}] range." - } -} - -# Test if TERM looks like to support colors -proc color_term {} { - expr {[info exists ::env(TERM)] && [string match *xterm* $::env(TERM)]} -} - -proc colorstr {color str} { - if {[color_term]} { - set b 0 - if {[string range $color 0 4] eq {bold-}} { - set b 1 - set color [string range $color 5 end] - } - switch $color { - red {set colorcode {31}} - green {set colorcode {32}} - yellow {set colorcode {33}} - blue {set colorcode {34}} - magenta {set colorcode {35}} - cyan {set colorcode {36}} - white {set colorcode {37}} - default {set colorcode {37}} - } - if {$colorcode ne {}} { - return "\033\[$b;${colorcode};49m$str\033\[0m" - } - } else { - return $str - } -} - -# Execute a background process writing random data for the specified number -# of seconds to the specified Redis instance. -proc start_write_load {host port seconds} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/gen_write_load.tcl $host $port $seconds & -} - -# Stop a process generating write load executed with start_write_load. -proc stop_write_load {handle} { - catch {exec /bin/kill -9 $handle} -} - -# Mock debug populate -proc populate {size} { - for {set counter 0} {$counter < $size} {incr counter} { - r set "key:$counter" "key:$counter" - } -} - -proc wait_for_blocked_client {{idx 0}} { - wait_for_condition 50 100 { - [s $idx blocked_clients] ne 0 - } else { - fail "no blocked clients" - } -} - -# Shuffle a list with Fisher-Yates algorithm. -proc lshuffle {list} { - set n [llength $list] - while {$n>1} { - set j [expr {int(rand()*$n)}] - incr n -1 - if {$n==$j} continue - set v [lindex $list $j] - lset list $j [lindex $list $n] - lset list $n $v - } - return $list -} \ No newline at end of file diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl deleted file mode 100644 index 0665583b0..000000000 --- a/tests/test_helper.tcl +++ /dev/null @@ -1,554 +0,0 @@ -# Redis test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com -# This software is released under the BSD License. See the COPYING file for -# more information. - -package require Tcl 8.5 - -set tcl_precision 17 -source tests/support/redis.tcl -source tests/support/server.tcl -source tests/support/tmpfile.tcl -source tests/support/test.tcl -source tests/support/util.tcl - -set ::all_tests { - # unit/printver - # unit/basic - # unit/scan - # unit/multi - # unit/quit - # unit/type/list - # unit/pubsub - # unit/slowlog - # unit/maxmemory - # unit/bitops - # unit/hyperloglog - # unit/type - # unit/acl - # unit/type/list-2 - # unit/type/list-3 - # unit/type/set - # unit/type/zset - # unit/type/string - unit/type/hash - # unit/expire - # unit/protocol - # unit/other - # unit/auth - # unit/sort - # unit/aofrw - # unit/scripting - # unit/introspection - # unit/limits - # unit/obuf-limits - # unit/dump - # unit/memefficiency - # unit/command - # unit/tcl/replication - # unit/tcl/replication-2 - # unit/tcl/replication-3 - # unit/tcl/replication-4 - # unit/tcl/replication-psync - # unit/tcl/aof - # unit/tcl/rdb - # unit/tcl/convert-zipmap-hash-on-load -} - -# because the comment not works in tcl list, use regsub to ignore the item starting with '#' -regsub -all {#.*?\n} $::all_tests {} ::all_tests - - -# Index to the next test to run in the ::all_tests list. -set ::next_test 0 - -set ::host 127.0.0.1 -set ::port 21111 -set ::traceleaks 0 -set ::valgrind 0 -set ::verbose 0 -set ::quiet 0 -set ::denytags {} -set ::allowtags {} -set ::external 0; # If "1" this means, we are running against external instance -set ::file ""; # If set, runs only the tests in this comma separated list -set ::curfile ""; # Hold the filename of the current suite -set ::accurate 0; # If true runs fuzz tests with more iterations -set ::force_failure 0 -set ::timeout 600; # 10 minutes without progresses will quit the test. -set ::last_progress [clock seconds] -set ::active_servers {} ; # Pids of active Redis instances. - -# Set to 1 when we are running in client mode. The Redis test uses a -# server-client model to run tests simultaneously. The server instance -# runs the specified number of client instances that will actually run tests. -# The server is responsible of showing the result to the user, and exit with -# the appropriate exit code depending on the test outcome. -set ::client 0 -set ::numclients 16 - -proc execute_tests name { - set path "tests/$name.tcl" - set ::curfile $path - source $path - send_data_packet $::test_server_fd done "$name" -} - -# Setup a list to hold a stack of server configs. When calls to start_server -# are nested, use "srv 0 pid" to get the pid of the inner server. To access -# outer servers, use "srv -1 pid" etcetera. -set ::servers {} -proc srv {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set property [lindex $args 1] - } else { - set property [lindex $args 0] - } - set srv [lindex $::servers end+$level] - dict get $srv $property -} - -# Provide easy access to the client for the inner server. It's possible to -# prepend the argument list with a negative level to access clients for -# servers running in outer blocks. -proc r {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - [srv $level "client"] {*}$args -} - -proc reconnect {args} { - set level [lindex $args 0] - if {[string length $level] == 0 || ![string is integer $level]} { - set level 0 - } - - set srv [lindex $::servers end+$level] - set host [dict get $srv "host"] - set port [dict get $srv "port"] - set config [dict get $srv "config"] - set client [redis $host $port] - dict set srv "client" $client - - # select the right db when we don't have to authenticate - if {![dict exists $config "requirepass"]} { - # $client select 9 - } - - # re-set $srv in the servers list - lset ::servers end+$level $srv -} - -proc redis_deferring_client {args} { - set level 0 - if {[llength $args] > 0 && [string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - - # create client that defers reading reply - set client [redis [srv $level "host"] [srv $level "port"] 1] - - # select the right db and read the response (OK) - $client select 0 - $client read - return $client -} - -# Provide easy access to INFO properties. Same semantic as "proc r". -proc s {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - status [srv $level "client"] [lindex $args 0] -} - -proc cleanup {} { - if {!$::quiet} {puts -nonewline "Cleanup: may take some time... "} - flush stdout - catch {exec rm -rf {*}[glob tests/tmp/redis.conf.*]} - catch {exec rm -rf {*}[glob tests/tmp/server.*]} - if {!$::quiet} {puts "OK"} -} - -proc test_server_main {} { - cleanup - set tclsh [info nameofexecutable] - # Open a listening socket, trying different ports in order to find a - # non busy one. - set port [find_available_port 11111] - if {!$::quiet} { - puts "Starting test server at port $port" - } - socket -server accept_test_clients -myaddr 127.0.0.1 $port - - # Start the client instances - set ::clients_pids {} - set start_port [expr {$::port+100}] - for {set j 0} {$j < $::numclients} {incr j} { - set start_port [find_available_port $start_port] - set p [exec $tclsh [info script] {*}$::argv \ - --client $port --port $start_port &] - lappend ::clients_pids $p - incr start_port 10 - } - - # Setup global state for the test server - set ::idle_clients {} - set ::active_clients {} - array set ::active_clients_task {} - array set ::clients_start_time {} - set ::clients_time_history {} - set ::failed_tests {} - - # Enter the event loop to handle clients I/O - after 100 test_server_cron - vwait forever -} - -# This function gets called 10 times per second. -proc test_server_cron {} { - set elapsed [expr {[clock seconds]-$::last_progress}] - - if {$elapsed > $::timeout} { - set err "\[[colorstr red TIMEOUT]\]: clients state report follows." - puts $err - show_clients_state - kill_clients - force_kill_all_servers - the_end - } - - after 100 test_server_cron -} - -proc accept_test_clients {fd addr port} { - fconfigure $fd -encoding binary - fileevent $fd readable [list read_from_test_client $fd] -} - -# This is the readable handler of our test server. Clients send us messages -# in the form of a status code such and additional data. Supported -# status types are: -# -# ready: the client is ready to execute the command. Only sent at client -# startup. The server will queue the client FD in the list of idle -# clients. -# testing: just used to signal that a given test started. -# ok: a test was executed with success. -# err: a test was executed with an error. -# exception: there was a runtime exception while executing the test. -# done: all the specified test file was processed, this test client is -# ready to accept a new task. -proc read_from_test_client fd { - set bytes [gets $fd] - set payload [read $fd $bytes] - foreach {status data} $payload break - set ::last_progress [clock seconds] - - if {$status eq {ready}} { - if {!$::quiet} { - puts "\[$status\]: $data" - } - signal_idle_client $fd - } elseif {$status eq {done}} { - set elapsed [expr {[clock seconds]-$::clients_start_time($fd)}] - set all_tests_count [llength $::all_tests] - set running_tests_count [expr {[llength $::active_clients]-1}] - set completed_tests_count [expr {$::next_test-$running_tests_count}] - puts "\[$completed_tests_count/$all_tests_count [colorstr yellow $status]\]: $data ($elapsed seconds)" - lappend ::clients_time_history $elapsed $data - signal_idle_client $fd - set ::active_clients_task($fd) DONE - } elseif {$status eq {ok}} { - if {!$::quiet} { - puts "\[[colorstr green $status]\]: $data" - } - set ::active_clients_task($fd) "(OK) $data" - } elseif {$status eq {err}} { - set err "\[[colorstr red $status]\]: $data" - puts $err - lappend ::failed_tests $err - set ::active_clients_task($fd) "(ERR) $data" - } elseif {$status eq {exception}} { - puts "\[[colorstr red $status]\]: $data" - kill_clients - force_kill_all_servers - exit 1 - } elseif {$status eq {testing}} { - set ::active_clients_task($fd) "(IN PROGRESS) $data" - } elseif {$status eq {server-spawned}} { - lappend ::active_servers $data - } elseif {$status eq {server-killed}} { - set ::active_servers [lsearch -all -inline -not -exact $::active_servers $data] - } else { - if {!$::quiet} { - puts "\[$status\]: $data" - } - } -} - -proc show_clients_state {} { - # The following loop is only useful for debugging tests that may - # enter an infinite loop. Commented out normally. - foreach x $::active_clients { - if {[info exist ::active_clients_task($x)]} { - puts "$x => $::active_clients_task($x)" - } else { - puts "$x => ???" - } - } -} - -proc kill_clients {} { - foreach p $::clients_pids { - catch {exec kill $p} - } -} - -proc force_kill_all_servers {} { - foreach p $::active_servers { - puts "Killing still running Redis server $p" - catch {exec kill -9 $p} - } -} - -# A new client is idle. Remove it from the list of active clients and -# if there are still test units to run, launch them. -proc signal_idle_client fd { - # Remove this fd from the list of active clients. - set ::active_clients \ - [lsearch -all -inline -not -exact $::active_clients $fd] - - if 0 {show_clients_state} - - # New unit to process? - if {$::next_test != [llength $::all_tests]} { - if {!$::quiet} { - puts [colorstr bold-white "Testing [lindex $::all_tests $::next_test]"] - set ::active_clients_task($fd) "ASSIGNED: $fd ([lindex $::all_tests $::next_test])" - } - set ::clients_start_time($fd) [clock seconds] - send_data_packet $fd run [lindex $::all_tests $::next_test] - lappend ::active_clients $fd - incr ::next_test - } else { - lappend ::idle_clients $fd - if {[llength $::active_clients] == 0} { - the_end - } - } -} - -# The the_end function gets called when all the test units were already -# executed, so the test finished. -proc the_end {} { - # TODO: print the status, exit with the rigth exit code. - puts "\n The End\n" - puts "Execution time of different units:" - foreach {time name} $::clients_time_history { - puts " $time seconds - $name" - } - if {[llength $::failed_tests]} { - puts "\n[colorstr bold-red {!!! WARNING}] The following tests failed:\n" - foreach failed $::failed_tests { - puts "*** $failed" - } - cleanup - exit 1 - } else { - puts "\n[colorstr bold-white {\o/}] [colorstr bold-green {All tests passed without errors!}]\n" - cleanup - exit 0 - } -} - -# The client is not even driven (the test server is instead) as we just need -# to read the command, execute, reply... all this in a loop. -proc test_client_main server_port { - set ::test_server_fd [socket localhost $server_port] - fconfigure $::test_server_fd -encoding binary - send_data_packet $::test_server_fd ready [pid] - while 1 { - set bytes [gets $::test_server_fd] - set payload [read $::test_server_fd $bytes] - foreach {cmd data} $payload break - if {$cmd eq {run}} { - execute_tests $data - } else { - error "Unknown test client command: $cmd" - } - } -} - -proc send_data_packet {fd status data} { - set payload [list $status $data] - puts $fd [string length $payload] - puts -nonewline $fd $payload - flush $fd -} - -proc print_help_screen {} { - puts [join { - "--valgrind Run the test over valgrind." - "--accurate Run slow randomized tests for more iterations." - "--quiet Don't show individual tests." - "--single Just execute the specified unit (see next option)." - "--list-tests List all the available test units." - "--clients Number of test clients (default 16)." - "--timeout Test timeout in seconds (default 10 min)." - "--force-failure Force the execution of a test that always fails." - "--help Print this help screen." - } "\n"] -} - -# parse arguments -for {set j 0} {$j < [llength $argv]} {incr j} { - set opt [lindex $argv $j] - set arg [lindex $argv [expr $j+1]] - if {$opt eq {--tags}} { - foreach tag $arg { - if {[string index $tag 0] eq "-"} { - lappend ::denytags [string range $tag 1 end] - } else { - lappend ::allowtags $tag - } - } - incr j - } elseif {$opt eq {--valgrind}} { - set ::valgrind 1 - } elseif {$opt eq {--quiet}} { - set ::quiet 1 - } elseif {$opt eq {--host}} { - set ::external 1 - set ::host $arg - incr j - } elseif {$opt eq {--port}} { - set ::port $arg - incr j - } elseif {$opt eq {--accurate}} { - set ::accurate 1 - } elseif {$opt eq {--force-failure}} { - set ::force_failure 1 - } elseif {$opt eq {--single}} { - set ::all_tests $arg - incr j - } elseif {$opt eq {--list-tests}} { - foreach t $::all_tests { - puts $t - } - exit 0 - } elseif {$opt eq {--client}} { - set ::client 1 - set ::test_server_port $arg - incr j - } elseif {$opt eq {--clients}} { - set ::numclients $arg - incr j - } elseif {$opt eq {--timeout}} { - set ::timeout $arg - incr j - } elseif {$opt eq {--help}} { - print_help_screen - exit 0 - } else { - puts "Wrong argument: $opt" - exit 1 - } -} - -proc attach_to_replication_stream {} { - set s [socket [srv 0 "host"] [srv 0 "port"]] - fconfigure $s -translation binary - puts -nonewline $s "SYNC\r\n" - flush $s - - # Get the count - set count [gets $s] - set prefix [string range $count 0 0] - if {$prefix ne {$}} { - error "attach_to_replication_stream error. Received '$count' as count." - } - set count [string range $count 1 end] - - # Consume the bulk payload - while {$count} { - set buf [read $s $count] - set count [expr {$count-[string length $buf]}] - } - return $s -} - -proc read_from_replication_stream {s} { - fconfigure $s -blocking 0 - set attempt 0 - while {[gets $s count] == -1} { - if {[incr attempt] == 10} return "" - after 100 - } - fconfigure $s -blocking 1 - set count [string range $count 1 end] - - # Return a list of arguments for the command. - set res {} - for {set j 0} {$j < $count} {incr j} { - read $s 1 - set arg [::redis::redis_bulk_read $s] - if {$j == 0} {set arg [string tolower $arg]} - lappend res $arg - } - return $res -} - -proc assert_replication_stream {s patterns} { - for {set j 0} {$j < [llength $patterns]} {incr j} { - assert_match [lindex $patterns $j] [read_from_replication_stream $s] - } -} - -proc close_replication_stream {s} { - close $s -} - -# With the parallel test running multiple Redis instances at the same time -# we need a fast enough computer, otherwise a lot of tests may generate -# false positives. -# If the computer is too slow we revert the sequential test without any -# parallelism, that is, clients == 1. -proc is_a_slow_computer {} { - set start [clock milliseconds] - for {set j 0} {$j < 1000000} {incr j} {} - set elapsed [expr [clock milliseconds]-$start] - expr {$elapsed > 200} -} - -if {$::client} { - if {[catch { test_client_main $::test_server_port } err]} { - set estr "Executing test client: $err.\n$::errorInfo" - if {[catch {send_data_packet $::test_server_fd exception $estr}]} { - puts $estr - } - exit 1 - } -} else { - if {[is_a_slow_computer]} { - puts "** SLOW COMPUTER ** Using a single client to avoid false positives." - set ::numclients 1 - } - - if {[catch { test_server_main } err]} { - if {[string length $err] > 0} { - # only display error when not generated by the test suite - if {$err ne "exception"} { - puts $::errorInfo - } - exit 1 - } - } -} diff --git a/tests/unit/Blpop_Brpop_test.py b/tests/unit/Blpop_Brpop_test.py deleted file mode 100644 index 664673276..000000000 --- a/tests/unit/Blpop_Brpop_test.py +++ /dev/null @@ -1,896 +0,0 @@ -import threading -import time -import redis -import random -import string - - - - -# 单个list不阻塞时的出列顺序测试(行为应当和lpop/rpop一样) -def test_single_existing_list(db_): - print("start test_single_existing_list, db:db%d" % (db_)) - # 创建Redis客户端 - pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - - # 清空测试环境 - pika.delete('blist') - - # 向列表a中插入元素 - pika.lpush('blist', 'a', 'b', 'large', 'c', 'd') - # 此时blist1顺序为: d c large b a - - result = pika.blpop('blist', timeout=0) - assert result[0] == b'blist' and result[1] == b'd', f"Expected (b'blist1', b'd'), but got {result}" - result = pika.brpop('blist', timeout=0) - assert result[0] == b'blist' and result[1] == b'a', f"Expected (b'blist1', b'a'), but got {result}" - - result = pika.blpop("blist", timeout=0) - assert result[0] == b'blist' and result[1] == b'c', f"Expected (b'blist1', b'c'), but got {result}" - result = pika.brpop('blist', timeout=0) - assert result[0] == b'blist' and result[1] == b'b', f"Expected (b'blist1', b'b'), but got {result}" - - pika.close() - print("test_single_existing_list Passed [Passed], db:db%d" % (db_)) - - -# 解阻塞测试(超时自动解阻塞,lpush解阻塞,rpush解阻塞,rpoplpush解阻塞) -def test_blpop_brpop_unblock_lrpush_rpoplpush(db_): - print("start test_blpop_brpop_unblock_lrpush_rpoplpush, db:db%d" % (db_)) - pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - - # 超时自动解阻塞测试(blpop) - blocked = True - blocked_lock = threading.Lock() - pika.delete('blist') - - def blpop_thread1(timeout_): - nonlocal blocked - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop('blist', timeout=timeout_) - with blocked_lock: - blocked = False - client.close() - - thread = threading.Thread(target=blpop_thread1, args=(1,)) - thread.start() - time.sleep(2) - with blocked_lock: - assert blocked == False, f"Expected False but got {blocked}" - thread.join() - - # 超时自动解阻塞测试(brpop) - blocked = True - blocked_lock = threading.Lock() - pika.delete('blist') - - def brpop_thread2(timeout_): - nonlocal blocked - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop('blist', timeout=timeout_) - with blocked_lock: - blocked = False - client.close() - - thread = threading.Thread(target=brpop_thread2, args=(2,)) - thread.start() - time.sleep(3) - with blocked_lock: - assert blocked == False, f"Expected False but got {blocked}" - thread.join() - - # lpush解brpop阻塞 - blocked = True - blocked_lock = threading.Lock() - pika.delete('blist') - - def brpop_thread3(): - nonlocal blocked - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop('blist', timeout=0) - with blocked_lock: - blocked = False - client.close() - - thread = threading.Thread(target=brpop_thread3) - thread.start() - time.sleep(1) - pika.lpush('blist', 'foo') - time.sleep(1) - with blocked_lock: - assert blocked == False, f"Expected False but got {blocked}" - thread.join() - - # lpush解blpop阻塞 - blocked = True - blocked_lock = threading.Lock() - pika.delete('blist') - - def blpop_thread31(): - nonlocal blocked - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop('blist', timeout=0) - with blocked_lock: - blocked = False - client.close() - - thread = threading.Thread(target=blpop_thread31) - thread.start() - time.sleep(1) - pika.lpush('blist', 'foo') - time.sleep(1) - with blocked_lock: - assert blocked == False, f"Expected False but got {blocked}" - thread.join() - - # rpush解blpop阻塞 - blocked = True - blocked_lock = threading.Lock() - pika.delete('blist') - - def blpop_thread4(): - nonlocal blocked - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop('blist', timeout=0) - with blocked_lock: - blocked = False - client.close() - - thread = threading.Thread(target=blpop_thread4) - thread.start() - time.sleep(1) - pika.rpush('blist', 'foo') - time.sleep(1) - with blocked_lock: - assert blocked == False, f"Expected False but got {blocked}" - thread.join() - - # rpush解brpop阻塞 - blocked = True - blocked_lock = threading.Lock() - pika.delete('blist') - - def brpop_thread41(): - nonlocal blocked - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop('blist', timeout=0) - with blocked_lock: - blocked = False - client.close() - - thread = threading.Thread(target=brpop_thread41) - thread.start() - time.sleep(1) - pika.rpush('blist', 'foo') - time.sleep(1) - with blocked_lock: - assert blocked == False, f"Expected False but got {blocked}" - thread.join() - - # rpoplpush解blpop阻塞 - blocked = True - blocked_lock = threading.Lock() - pika.delete('blist') - pika.lpush('blist0', 'v1') - - def blpop_thread5(): - nonlocal blocked - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop('blist', timeout=0) - with blocked_lock: - blocked = False - client.close() - - thread = threading.Thread(target=blpop_thread5) - thread.start() - time.sleep(1) - pika.rpoplpush('blist0', 'blist') - time.sleep(1) - with blocked_lock: - assert blocked == False, f"Expected False but got {blocked}" - thread.join() - - # rpoplpush解brpop阻塞 - blocked = True - blocked_lock = threading.Lock() - pika.delete('blist') - pika.lpush('blist0', 'v1') - - def brpop_thread51(): - nonlocal blocked - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop('blist', timeout=0) - with blocked_lock: - blocked = False - client.close() - - thread = threading.Thread(target=brpop_thread51) - thread.start() - time.sleep(1) - pika.rpoplpush('blist0', 'blist') - time.sleep(1) - with blocked_lock: - assert blocked == False, f"Expected False but got {blocked}" - thread.join() - pika.close() - print("test_blpop_brpop_unblock_lrpush_rpoplpush Passed [Passed], db:db%d" % (db_)) - - -def test_concurrency_block_unblock(db_): - print("start test_concurrency_block_unblock, it will cost some time, pls wait, db:db%d" % (db_)) - pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - pika.delete('blist0', 'blist1', 'blist2', 'blist3') - - def blpop_thread(list, timeout_): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop(list, timeout=timeout_) - client.close() - - def brpop_thread(list, timeout_): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop(list, timeout=timeout_) - client.close() - - def lpush_thread(list_, value_): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - client.lpush(list_, value_) - client.close() - - def rpush_thread(list_, value_): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - client.rpush(list_, value_) - client.close() - - pika.delete('blist0', 'blist1', 'blist2', 'blist3') - pika.delete('blist100', 'blist101', 'blist102', 'blist103') - - lists = ['blist0', 'blist1', 'blist2', 'blist3'] - # 先增加一些阻塞连接作为干扰 - t_threads = [] - for i in range(0, 25): - t1 = threading.Thread(target=blpop_thread, args=(['blist100', 'blist101', 'blist102', 'blist103'], 30)) - t2 = threading.Thread(target=brpop_thread, args=(['blist100', 'blist101', 'blist102', 'blist103'], 30)) - t1.start() - t2.start() - t_threads.append(t1) - t_threads.append(t2) - - # 并发超时测试 - threads = [] - # 添加100个线程执行blpop/brpop,同时被阻塞,并且应当2s后超时自动解阻塞 - for i in range(0, 50): - t1 = threading.Thread(target=blpop_thread, args=(lists, 2)) - t2 = threading.Thread(target=brpop_thread, args=(lists, 2)) - t1.start() - t2.start() - threads.append(t1) - threads.append(t2) - # 线程结束需要一些时间 - time.sleep(6) - for t in threads: - if t.is_alive(): - assert False, "Error: this thread is still running, means conn didn't got unblocked in time" - else: - pass - # print("conn unblocked, OK") - - # 并发push解阻塞测试 - threads = [] - # 添加100个线程执行blpop/brpop,同时被阻塞 - for i in range(0, 50): - t1 = threading.Thread(target=blpop_thread, args=(lists, 0)) - t2 = threading.Thread(target=brpop_thread, args=(lists, 0)) - t1.start() - t2.start() - threads.append(t1) - threads.append(t2) - # 确保线程都执行了blpop/brpop - time.sleep(5) - - # push 200条数据,确保能解除前面100个conn的阻塞 - for i in range(0, 50): - t1 = threading.Thread(target=lpush_thread, args=('blist2', 'v')) - t2 = threading.Thread(target=rpush_thread, args=('blist0', 'v')) - t3 = threading.Thread(target=lpush_thread, args=('blist1', 'v')) - t4 = threading.Thread(target=rpush_thread, args=('blist3', 'v')) - t1.start() - t2.start() - t3.start() - t4.start() - # 100个线程结束需要时间 - time.sleep(5) - for t in threads: - if t.is_alive(): - assert False, "Error: this thread is still running, means conn didn't got unblocked in time" - else: - pass - # print("conn unblocked, OK") - - pika.delete('blist0', 'blist1', 'blist2', 'blist3') - - # 混合并发(一半自动解阻塞,一半push解阻塞) - threads = [] - # 添加100个线程执行blpop/brpop,同时被阻塞 - for i in range(0, 25): - t1 = threading.Thread(target=blpop_thread, args=(['blist0', 'blist1'], 3)) - t2 = threading.Thread(target=brpop_thread, args=(['blist0', 'blist1'], 3)) - t3 = threading.Thread(target=blpop_thread, args=(['blist2', 'blist3'], 0)) - t4 = threading.Thread(target=brpop_thread, args=(['blist2', 'blist3'], 0)) - t1.start() - t2.start() - t3.start() - t4.start() - threads.append(t1) - threads.append(t2) - - # 确保blpop/brpop都执行完了,并且其中50个conn马上要开始超时解除阻塞 - time.sleep(3) - - # 并发push 100条数据,确保能解除前面50个conn的阻塞 - for i in range(0, 25): - t1 = threading.Thread(target=lpush_thread, args=('blist2', 'v')) - t2 = threading.Thread(target=rpush_thread, args=('blist3', 'v')) - t3 = threading.Thread(target=lpush_thread, args=('blist2', 'v')) - t4 = threading.Thread(target=rpush_thread, args=('blist3', 'v')) - t1.start() - t2.start() - t3.start() - t4.start() - - # 100个线程结束需要时间 - time.sleep(5) - for t in threads: - if t.is_alive(): - assert False, "Error: this thread is still running, means conn didn't got unblocked in time" - else: - pass - # print("conn unblocked, OK") - - for t in t_threads: - t.join() - pika.delete('blist0', 'blist1', 'blist2', 'blist3') - - print("test_concurrency_block_unblock Passed [Passed], db:db%d" % (db_)) - pika.close() - - -# blpop/brpop多个list不阻塞时,从左到右选择第一个有元素的list进行pop -def test_multiple_existing_lists(db_): - print("start test_multiple_existing_lists, db:db%d" % (db_)) - # 创建Redis客户端 - pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - - # 清空测试环境 - pika.delete('blist1', 'large', 'large', 'blist2') - - # 向blist1和blist2列表中插入元素 - pika.rpush('blist1', 'a', "large", 'c') - pika.rpush('blist2', 'd', "large", 'f') - - result = pika.blpop(['blist1', 'blist2'], timeout=1) - assert result[0] == b'blist1' and result[1] == b'a', f"Expected (b'blist1', b'a'), but got {result}" - result = pika.brpop(['blist1', 'blist2'], timeout=1) - assert result[0] == b'blist1' and result[1] == b'c', f"Expected (b'blist1', b'c'), but got {result}" - - result = pika.llen('blist1') - assert result == 1, f"Expected 1, but got {result}" - result = pika.llen('blist2') - assert result == 3, f"Expected 3, but got {result}" - - result = pika.blpop(['blist2', 'blist1'], timeout=1) - assert result[0] == b'blist2' and result[1] == b'd', f"Expected (b'blist2', b'd'), but got {result}" - result = pika.brpop(['blist2', 'blist1'], timeout=1) - assert result[0] == b'blist2' and result[1] == b'f', f"Expected (b'blist2', b'f'), but got {result}" - - result = pika.llen('blist1') - assert result == 1, f"Expected 1, but got {result}" - result = pika.llen('blist2') - assert result == 1, f"Expected 1, but got {result}" - - pika.delete("blist3") - # blist3没有元素,应该从blist1/blist2中弹出元素 - result = pika.blpop(['blist3', 'blist2'], timeout=0) - assert result[0] == b'blist2' and result[1] == b'large', f"Expected (b'blist2', b'large'), but got {result}" - - result = pika.brpop(['blist3', 'blist1'], timeout=0) - assert result[0] == b'blist1' and result[1] == b'large', f"Expected (b'blist1', b'large'), but got {result}" - - pika.close() - print("test_multiple_existing_lists Passed [Passed], db:db%d" % (db_)) - - -def test_blpop_brpop_same_key_multiple_times(db_): - print("start test_blpop_brpop_same_key_multiple_times, db:db%d" % (db_)) - # 创建Redis客户端 - pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - - # 清空测试环境 - pika.delete('list1', 'list2') - - def blpop_thread1(): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop(['list1', 'list2', 'list2', 'list1'], timeout=0) - assert result[0] == b'list1' and result[1] == b'a', f"Expected (b'list1', b'a'), but got {result}" - client.close() - - thread = threading.Thread(target=blpop_thread1) - thread.start() - # 确保BLPOP已经执行 - time.sleep(0.5) - # 向list1插入元素 - pika.lpush('list1', 'a') - # 等待线程结束 - thread.join() - - def blpop_thread2(): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop(['list1', 'list2', 'list2', 'list1'], timeout=0) - assert result[0] == b'list2' and result[1] == b'b', f"Expected (b'list2', b'b'), but got {result}" - client.close() - - thread = threading.Thread(target=blpop_thread2) - thread.start() - # 确保BLPOP已经执行 - time.sleep(0.5) - # 向list2插入元素 - pika.lpush('list2', 'b') - # 等待线程结束 - thread.join() - - # 提前插入元素 - pika.lpush('list1', 'c') - pika.lpush('list2', 'd') - result = pika.blpop(['list1', 'list2', 'list2', 'list1'], timeout=0) - assert result[0] == b'list1' and result[1] == b'c', f"Expected (b'list1', b'c'), but got {result}" - result = pika.blpop(['list1', 'list2', 'list2', 'list1'], timeout=0) - assert result[0] == b'list2' and result[1] == b'd', f"Expected (b'list2', b'd'), but got {result}" - - # 下面是brpop - # 清空测试环境 - pika.delete('list1', 'list2') - - def brpop_thread1(): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop(['list1', 'list2', 'list2', 'list1'], timeout=0) - assert result[0] == b'list1' and result[1] == b'a', f"Expected (b'list1', b'a'), but got {result}" - client.close() - - thread = threading.Thread(target=brpop_thread1) - thread.start() - # 确保BRPOP已经执行 - time.sleep(0.5) - # 向list1插入元素 - pika.rpush('list1', 'a') - # 等待线程结束 - thread.join() - - def brpop_thread2(): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop(['list1', 'list2', 'list2', 'list1'], timeout=0) - assert result[0] == b'list2' and result[1] == b'b', f"Expected (b'list2', b'b'), but got {result}" - client.close() - - thread = threading.Thread(target=brpop_thread2) - thread.start() - # 确保BRPOP已经执行 - time.sleep(0.5) - # 向list2插入元素 - pika.rpush('list2', 'b') - # 等待线程结束 - thread.join() - - # 提前插入元素 - pika.rpush('list1', 'c') - pika.rpush('list2', 'd') - result = pika.brpop(['list1', 'list2', 'list2', 'list1'], timeout=0) - assert result[0] == b'list1' and result[1] == b'c', f"Expected (b'list1', b'c'), but got {result}" - result = pika.brpop(['list1', 'list2', 'list2', 'list1'], timeout=0) - assert result[0] == b'list2' and result[1] == b'd', f"Expected (b'list2', b'd'), but got {result}" - - pika.close() - print("test_blpop_brpop_same_key_multiple_times Passed [Passed], db:db%d" % (db_)) - - -# 目标list被一条push增加了多个value,先完成多个value的入列再pop -def test_blpop_brpop_variadic_lpush(db_): - print("start test_blpop_brpop_variadic_lpush, db:db%d" % (db_)) - - # 创建Redis客户端 - pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - - # 清空测试环境 - pika.delete('blist') - - def blpop_thread(): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop('blist', timeout=0) - assert result[0] == b'blist' and result[1] == b'bar', f"Expected (b'blist', b'bar'), but got {result}" - client.close() - - # 启动一个线程,执行BLPOP操作 - thread = threading.Thread(target=blpop_thread) - thread.start() - time.sleep(0.5) - - # 使用LPUSH命令向blist插入多个元素 - pika.lpush('blist', 'foo', 'bar') - # lpush完毕后,blist内部顺序:bar foo - # 等待线程结束 - thread.join() - # 检查blist的第一个元素 - assert pika.lindex('blist', 0) == b'foo', "Expected 'foo'" - - # 下面是brpop的测试 - # 清空测试环境 - pika.delete('blist') - - def brpop_thread(): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop('blist', timeout=0) - assert result[0] == b'blist' and result[1] == b'bar', f"Expected (b'blist', b'bar'), but got {result}" - client.close() - - # 启动一个线程,执行BLPOP操作 - thread = threading.Thread(target=brpop_thread) - thread.start() - time.sleep(0.5) - - # 使用LPUSH命令向blist插入多个元素 - pika.rpush('blist', 'foo', 'bar') - # rpush完毕后,blist内部顺序:foo bar - # 等待线程结束 - thread.join() - # 检查blist的第一个元素 - assert pika.lindex('blist', 0) == b'foo', "Expected 'foo'" - print("test_blpop_brpop_variadic_lpush Passed [Passed], db:db%d" % (db_)) - - -# 先被阻塞的先服务/阻塞最久的优先级最高 -def test_serve_priority(db_): - print("start test_serve_priority, db:db%d" % (db_)) - - pika = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - - pika.delete('blist') - - def blpop_thread(expect): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop('blist', timeout=0) - assert result[0] == b'blist' and result[1] == expect, f"Expected (b'blist', {expect}), but got {result}" - - def brpop_thread(expect): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop('blist', timeout=0) - assert result[0] == b'blist' and result[1] == expect, f"Expected (b'blist', {expect}), but got {result}" - - # blpop测试 - t1 = threading.Thread(target=blpop_thread, args=(b'v1',)) - t1.start() - time.sleep(0.5) - t2 = threading.Thread(target=blpop_thread, args=(b'v2',)) - t2.start() - time.sleep(0.5) - pika.rpush('blist', 'v1', 'v2') - t1.join() - t2.join() - - # brpop测试 - t3 = threading.Thread(target=brpop_thread, args=(b'v4',)) - t3.start() - time.sleep(0.5) - t4 = threading.Thread(target=brpop_thread, args=(b'v3',)) - t4.start() - time.sleep(0.5) - pika.rpush('blist', 'v3', 'v4') - - t3.join() - t4.join() - - pika.close() - print("test_serve_priority Passed [Passed], db:db%d" % (db_)) - - -# 主从复制测试 -def test_master_slave_replication(db_): - print("start test_master_slave_replication, it will cost some time, pls wait, db:db%d" % (db_)) - - master = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - slave = redis.Redis(host=pika_slave_ip, port=int(pika_slave_port), db=db_) - slave.slaveof(pika_instance_ip, pika_instance_port) - time.sleep(25) - master.delete('blist0', 'blist1', 'blist') - - time.sleep(3) - m_keys = master.keys() - s_keys = slave.keys() - assert s_keys == m_keys, f'Expected: s_keys == m_keys, but got {s_keys == m_keys}' - - # 非阻塞的主从复制测试 - def thread1(): - nonlocal master - for i in range(0, 25): - letters = string.ascii_letters - random_str1 = ''.join(random.choice(letters) for _ in range(5)) - random_str2 = ''.join(random.choice(letters) for _ in range(5)) - random_str3 = ''.join(random.choice(letters) for _ in range(5)) - master.lpush('blist0', random_str1) - master.rpoplpush('blist0', 'blist') - master.lpush('blist', random_str1, random_str2, random_str3) - master.lpush('blist0', random_str2) - master.rpoplpush('blist0', 'blist') - master.blpop('blist') - master.brpop('blist') - master.rpush('blist', random_str3, random_str2, random_str1) - master.blpop('blist') - master.brpop('blist') - - t1 = threading.Thread(target=thread1) - t2 = threading.Thread(target=thread1) - t3 = threading.Thread(target=thread1) - t4 = threading.Thread(target=thread1) - t5 = threading.Thread(target=thread1) - t6 = threading.Thread(target=thread1) - t1.start() - t2.start() - t3.start() - t4.start() - t5.start() - t6.start() - - t1.join() - t2.join() - t3.join() - t4.join() - t5.join() - t6.join() - time.sleep(3) - m_keys = master.keys() - s_keys = slave.keys() - assert s_keys == m_keys, f'Expected: s_keys == m_keys, but got {s_keys == m_keys}' - - for i in range(0, master.llen('blist')): - assert master.lindex('blist', i) == slave.lindex('blist', i), \ - f"Expected:master.lindex('blist', i) == slave.linex('blist', i), but got False when i = {i}" - - master.delete('blist0', 'blist1') - - # 阻塞的主从复制测试 - def blpop_thread(list_, value_): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop(['blist0', 'blist1'], timeout=0) - assert result[0] == list_.encode() and result[ - 1] == value_.encode(), f"Expected: ({list_}, {value_}), but got = {result}" - client.close() - - def blpop_thread1(): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.blpop(['blist0', 'blist1'], timeout=0) - client.close() - - def brpop_thread(list_, value_): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop(['blist0', 'blist1'], timeout=0) - assert result[0] == list_.encode() and result[ - 1] == value_.encode(), f"Expected: ({list_}, {value_}), but got = {result}" - client.close() - - def brpop_thread1(): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.brpop(['blist0', 'blist1'], timeout=0) - client.close() - - for i in range(0, 5): - letters = string.ascii_letters - random_str1 = ''.join(random.choice(letters) for _ in range(5)) - random_str2 = ''.join(random.choice(letters) for _ in range(5)) - random_str3 = ''.join(random.choice(letters) for _ in range(5)) - random_str4 = ''.join(random.choice(letters) for _ in range(5)) - random_str5 = ''.join(random.choice(letters) for _ in range(5)) - - t1 = threading.Thread(target=blpop_thread, args=('blist1', random_str1,)) - t2 = threading.Thread(target=brpop_thread, args=('blist0', random_str2,)) - t3 = threading.Thread(target=blpop_thread, args=('blist1', random_str3,)) - t4 = threading.Thread(target=brpop_thread, args=('blist0', random_str4,)) - t5 = threading.Thread(target=blpop_thread, args=('blist1', random_str5,)) - - t1.start() - time.sleep(0.5) # 确保阻塞顺序 - t2.start() - time.sleep(0.5) - t3.start() - time.sleep(0.5) - t4.start() - time.sleep(0.5) - t5.start() - time.sleep(0.5) - master.lpush('blist1', random_str1) - time.sleep(0.1) - master.rpush('blist0', random_str2) - time.sleep(0.1) - master.lpush('blist1', random_str3) - time.sleep(0.1) - master.rpush('blist0', random_str4) - time.sleep(0.1) - master.lpush('blist1', random_str5) - - t1.join() - t2.join() - t3.join() - t4.join() - t5.join() - time.sleep(1) - m_keys = master.keys() - s_keys = slave.keys() - assert s_keys == m_keys, f'Expected: s_keys == m_keys, but got {s_keys == m_keys}' - for i in range(0, master.llen('blist0')): - assert master.lindex('blist0', i) == slave.lindex('blist0', i), \ - f"Expected:master.lindex('blist0', i) == slave.linex('blist0', i), but got False when i = {i}" - - # 解阻塞过程中高频pop/push, 看binlog是否会乱 - threads1 = [] - for i in range(0, 30): - t1 = threading.Thread(target=blpop_thread1) - t2 = threading.Thread(target=brpop_thread1) - t1.start() - t2.start() - threads1.append(t1) - threads1.append(t2) - - # 此时针对blist0,blist1有60个阻塞,接下来对blist0连续push多次元素(解除阻塞),同时高频pop同被阻塞的client竞争 - def lpop_thread(list): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.lpop(list) - client.close() - - def rpop_thread(list): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - result = client.lpop(list) - client.close() - - def lpush_thread(list_, value1_, value2_, value3_, value4_, value5_): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - client.lpush(list_, value1_, value2_, value3_, value4_, value5_) - client.close() - - def rpush_thread(list_, value_, value2_, value3_, value4_, value5_): - client = redis.Redis(host=pika_instance_ip, port=int(pika_instance_port), db=db_) - client.rpush(list_, value_, value2_, value3_, value4_, value5_) - client.close() - - threads2 = [] - for i in range(0, 30): # 每轮push进15个元素,最多pop了9个元素,最少剩下6个元素,所以循环至少要有10次,否则前面的线程不能全部被解阻塞 - letters = string.ascii_letters - random_str1 = ''.join(random.choice(letters) for _ in range(5)) - random_str2 = ''.join(random.choice(letters) for _ in range(5)) - random_str3 = ''.join(random.choice(letters) for _ in range(5)) - random_str4 = ''.join(random.choice(letters) for _ in range(5)) - random_str5 = ''.join(random.choice(letters) for _ in range(5)) - random_str6 = ''.join(random.choice(letters) for _ in range(5)) - random_str7 = ''.join(random.choice(letters) for _ in range(5)) - random_str8 = ''.join(random.choice(letters) for _ in range(5)) - random_str9 = ''.join(random.choice(letters) for _ in range(5)) - t1 = threading.Thread(target=lpush_thread, - args=('blist0', random_str1, random_str2, random_str3, random_str4, random_str5)) - t2 = threading.Thread(target=lpop_thread, args=('blist0',)) - t3 = threading.Thread(target=lpop_thread, args=('blist0',)) - t4 = threading.Thread(target=lpop_thread, args=('blist0',)) - t5 = threading.Thread(target=rpush_thread, - args=('blist0', random_str9, random_str8, random_str7, random_str6, random_str5)) - t6 = threading.Thread(target=rpop_thread, args=('blist0',)) - t7 = threading.Thread(target=rpop_thread, args=('blist0',)) - t8 = threading.Thread(target=rpop_thread, args=('blist0',)) - t9 = threading.Thread(target=rpush_thread, - args=('blist0', random_str7, random_str8, random_str9, random_str1, random_str2)) - t10 = threading.Thread(target=lpop_thread, args=('blist0',)) - t11 = threading.Thread(target=lpop_thread, args=('blist0',)) - t12 = threading.Thread(target=lpop_thread, args=('blist0',)) - - threads2.append(t1) - threads2.append(t2) - threads2.append(t3) - threads2.append(t4) - threads2.append(t5) - threads2.append(t6) - threads2.append(t7) - threads2.append(t8) - threads2.append(t9) - threads2.append(t10) - threads2.append(t11) - threads2.append(t12) - - for t in threads2: - t.start() - - for t in threads1: - t.join() - time.sleep(5) - m_keys = master.keys() - s_keys = slave.keys() - assert s_keys == m_keys, f'Expected: s_keys == m_keys, but got {s_keys == m_keys}' - for i in range(0, master.llen('blist0')): - assert master.lindex('blist0', i) == slave.lindex('blist0', i), \ - f"Expected:master.lindex('blist0', i) == slave.linex('blist0', i), but got False when i = {i}" - - master.close() - slave.close() - print("test_master_slave_replication Passed [Passed], db:db%d" % (db_)) - -def test_with_db(db_id): - test_master_slave_replication(db_id) - test_single_existing_list(db_id) - test_blpop_brpop_unblock_lrpush_rpoplpush(db_id) - test_concurrency_block_unblock(db_id) - test_multiple_existing_lists(db_id) - test_blpop_brpop_same_key_multiple_times(db_id) - test_blpop_brpop_variadic_lpush(db_id) - test_serve_priority(db_id) - - -pika_instance_ip = '127.0.0.1' -pika_instance_port = '9221' -pika_slave_ip = '127.0.0.1' -pika_slave_port = '9231' - -# for i in range(0, 100): -#请给主从节点都开启2个db,否则注释掉db1_t相关的行,只做单db测试 -#如果不做主从复制测试,把test_master_slave_replication(db_id)注释掉 -db0_t = threading.Thread(target=test_with_db, args=(0,)) -db1_t = threading.Thread(target=test_with_db, args=(1,)) - -db0_t.start() -db1_t.start() - -db0_t.join() -db1_t.join() - - - -# 待添加的测试: -# 事务与blpop/brpop -# 1 事务内执行blpop/brpop如果没有获取到元素不阻塞,直接返回 -# 2 "BLPOP, LPUSH + DEL should not awake blocked client": 在事务内对一个空list进行了push后又del,当事务结束时list如果依旧是空的,则不应该去服务被阻塞的客户端(事务内的lpush不触发解阻塞动作,而是事务结束才做这个行为 -# redis单测逻辑如下 -# test "BLPOP, LPUSH + DEL should not awake blocked client" { -# set rd [redis_deferring_client] -# r del list -# -# $rd blpop list 0 -# r multi -# r lpush list a -# r del list -# r exec -# r del list -# r lpush list b -# $rd read -# } {list b} - -# 3 "BLPOP, LPUSH + DEL + SET should not awake blocked client": 这个测试用例与上一个类似,但在删除列表后,还使用SET命令将这个列表设置为一个字符串。 -# redis单测逻辑如下 -# test "BLPOP, LPUSH + DEL + SET should not awake blocked client" { -# set rd [redis_deferring_client] -# r del list -# -# $rd blpop list 0 -# r multi -# r lpush list a -# r del list -# r set list foo -# r exec -# r del list -# r lpush list b -# $rd read -# } {list b} - -# 4 "MULTI/EXEC is isolated from the point of view of BLPOP": 这个测试用例检查了在使用BLPOP命令阻塞等待一个列表的元素时,如果在此期间在一个Redis事务中向这个列表推入多个元素,阻塞的客户端应该只会接收到事务执行前的列表状态。 -# redis单测逻辑如下 -# test "MULTI/EXEC is isolated from the point of view of BLPOP" { -# set rd [redis_deferring_client] -# r del list -# $rd blpop list 0 -# r multi -# r lpush list a -# r lpush list b -# r lpush list c -# r exec -# $rd read -# } {list c} diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl deleted file mode 100644 index b7e3c51ca..000000000 --- a/tests/unit/acl.tcl +++ /dev/null @@ -1,1135 +0,0 @@ -start_server {tags {"acl external:skip"}} { - test {Connections start with the default user} { - r ACL WHOAMI - } {default} - - test {It is possible to create new users} { - r ACL setuser newuser - } - - test {Coverage: ACL USERS} { - r ACL USERS - } {default limit newuser} - - test {Usernames can not contain spaces or null characters} { - catch {r ACL setuser "a a"} err - set err - } {*Usernames can't contain spaces or null characters*} - - test {New users start disabled} { - r ACL setuser newuser >passwd1 - catch {r AUTH newuser passwd1} err - set err - } {*WRONGPASS*} - - test {Enabling the user allows the login} { - r ACL setuser newuser on +acl - r AUTH newuser passwd1 - r ACL WHOAMI - } {newuser} - - test {Only the set of correct passwords work} { - r ACL setuser newuser >passwd2 - catch {r AUTH newuser passwd1} e - assert {$e eq "OK"} - catch {r AUTH newuser passwd2} e - assert {$e eq "OK"} - catch {r AUTH newuser passwd3} e - set e - } {*WRONGPASS*} - - test {It is possible to remove passwords from the set of valid ones} { - r ACL setuser newuser pspass +acl +client +@pubsub - r AUTH psuser pspass - catch {r PUBLISH foo bar} e - set e - } {*NOPERM*channel*} - - test {By default, only default user is able to subscribe to any channel} { - set rd [redis_deferring_client] - $rd AUTH default pwd - $rd read - $rd SUBSCRIBE foo - assert_match {subscribe foo 1} [$rd read] - $rd UNSUBSCRIBE - $rd read - $rd AUTH psuser pspass - $rd read - $rd SUBSCRIBE foo - catch {$rd read} e - $rd close - set e - } {*NOPERM*channel*} - - test {By default, only default user is able to subscribe to any pattern} { - set rd [redis_deferring_client] - $rd AUTH default pwd - $rd read - $rd PSUBSCRIBE bar* - assert_match {psubscribe bar\* 1} [$rd read] - $rd PUNSUBSCRIBE - $rd read - $rd AUTH psuser pspass - $rd read - $rd PSUBSCRIBE bar* - catch {$rd read} e - $rd close - set e - } {*NOPERM*channel*} - - test {It's possible to allow publishing to a subset of channels} { - r ACL setuser psuser resetchannels &foo:1 &bar:* - assert_equal {0} [r PUBLISH foo:1 somemessage] - assert_equal {0} [r PUBLISH bar:2 anothermessage] - catch {r PUBLISH zap:3 nosuchmessage} e - set e - } {*NOPERM*channel*} - - test {Validate subset of channels is prefixed with resetchannels flag} { - r ACL setuser hpuser on nopass resetchannels &foo +@all - - # Verify resetchannels flag is prefixed before the channel name(s) - set users [r ACL LIST] - set curruser "hpuser" - - # authenticate as hpuser - r AUTH hpuser pass - - assert_equal {0} [r PUBLISH foo bar] - catch {r PUBLISH bar game} e - - # Falling back to psuser for the below tests - r AUTH psuser pspass - r ACL deluser hpuser - set e - } {*NOPERM*channel*} - - test {In transaction queue publish/subscribe/psubscribe to unauthorized channel will fail} { - r ACL setuser psuser +multi +discard - r MULTI - assert_error {*NOPERM*channel*} {r PUBLISH notexits helloworld} - r DISCARD - r MULTI - assert_error {*NOPERM*channel*} {r SUBSCRIBE notexits foo:1} - r DISCARD - r MULTI - assert_error {*NOPERM*channel*} {r PSUBSCRIBE notexits:* bar:*} - r DISCARD - } - - test {It's possible to allow subscribing to a subset of channels} { - set rd [redis_deferring_client] - $rd AUTH psuser pspass - $rd read - $rd SUBSCRIBE foo:1 - assert_match {subscribe foo:1 1} [$rd read] - $rd SUBSCRIBE bar:2 - assert_match {subscribe bar:2 2} [$rd read] - $rd SUBSCRIBE zap:3 - catch {$rd read} e - set e - } {*NOPERM*channel*} - -# test {It's possible to allow subscribing to a subset of shard channels} { -# set rd [redis_deferring_client] -# $rd AUTH psuser pspass -# $rd read -# $rd SSUBSCRIBE foo:1 -# assert_match {ssubscribe foo:1 1} [$rd read] -# $rd SSUBSCRIBE bar:2 -# assert_match {ssubscribe bar:2 2} [$rd read] -# $rd SSUBSCRIBE zap:3 -# catch {$rd read} e -# set e -# } {*NOPERM*channel*} - - test {It's possible to allow subscribing to a subset of channel patterns} { - set rd [redis_deferring_client] - $rd AUTH psuser pspass - $rd read - $rd PSUBSCRIBE foo:1 - assert_match {psubscribe foo:1 1} [$rd read] - $rd PSUBSCRIBE bar:* - assert_match {psubscribe bar:\* 2} [$rd read] - $rd PSUBSCRIBE bar:baz - catch {$rd read} e - set e - } {*NOPERM*channel*} - - test {Subscribers are killed when revoked of channel permission} { - set rd [redis_deferring_client] - r ACL setuser psuser resetchannels &foo:1 - $rd AUTH psuser pspass - $rd read - $rd CLIENT SETNAME deathrow - $rd read - $rd SUBSCRIBE foo:1 - $rd read - r ACL setuser psuser resetchannels - assert_no_match {*deathrow*} [r CLIENT LIST] - $rd close - } {0} - -# test {Subscribers are killed when revoked of channel permission} { -# set rd [redis_deferring_client] -# r ACL setuser psuser resetchannels &foo:1 -# $rd AUTH psuser pspass -# $rd read -# $rd CLIENT SETNAME deathrow -# $rd read -# $rd SSUBSCRIBE foo:1 -# $rd read -# r ACL setuser psuser resetchannels -# assert_no_match {*deathrow*} [r CLIENT LIST] -# $rd close -# } {0} - - test {Subscribers are killed when revoked of pattern permission} { - set rd [redis_deferring_client] - r ACL setuser psuser resetchannels &bar:* - $rd AUTH psuser pspass - $rd read - $rd CLIENT SETNAME deathrow - $rd read - $rd PSUBSCRIBE bar:* - $rd read - r ACL setuser psuser resetchannels - assert_no_match {*deathrow*} [r CLIENT LIST] - $rd close - } {0} - - test {Subscribers are killed when revoked of allchannels permission} { - set rd [redis_deferring_client] - r ACL setuser psuser allchannels - $rd AUTH psuser pspass - $rd read - $rd CLIENT SETNAME deathrow - $rd read - $rd PSUBSCRIBE foo - $rd read - r ACL setuser psuser resetchannels - assert_no_match {*deathrow*} [r CLIENT LIST] - $rd close - } {0} - -# test {Subscribers are pardoned if literal permissions are retained and/or gaining allchannels} { -# set rd [redis_deferring_client] -# r ACL setuser psuser resetchannels &foo:1 &bar:* &orders -# $rd AUTH psuser pspass -# $rd read -# $rd CLIENT SETNAME pardoned -# $rd read -# $rd SUBSCRIBE foo:1 -# $rd read -# $rd SSUBSCRIBE orders -# $rd read -# $rd PSUBSCRIBE bar:* -# $rd read -# r ACL setuser psuser resetchannels &foo:1 &bar:* &orders &baz:qaz &zoo:* -# assert_match {*pardoned*} [r CLIENT LIST] -# r ACL setuser psuser allchannels -# assert_match {*pardoned*} [r CLIENT LIST] -# $rd close -# } {0} - -### -# test {blocked command gets rejected when reprocessed after permission change} { -# r auth default "" -# r config resetstat -# set rd [redis_deferring_client] -# r ACL setuser psuser reset on nopass +@all allkeys -# $rd AUTH psuser pspass -# $rd read -# $rd BLPOP list1 0 -# wait_for_blocked_client -# r ACL setuser psuser resetkeys -# r LPUSH list1 foo -# assert_error {*NOPERM No permissions to access a key*} {$rd read} -# $rd ping -# $rd close -# assert_match {*calls=0,usec=0,*,rejected_calls=1,failed_calls=0} [cmdrstat blpop r] -# } - - test {Users can be configured to authenticate with any password} { - r ACL setuser newuser nopass - r AUTH newuser zipzapblabla - } {OK} - - test {ACLs can exclude single commands} { - r ACL setuser newuser -ping - r INCR mycounter ; # Should not raise an error - catch {r PING} e - set e - } {*NOPERM*ping*} - - test {ACLs can include or exclude whole classes of commands} { - r ACL setuser newuser -@all +@set +acl - r SADD myset a b c; # Should not raise an error - r ACL setuser newuser +@all -@string - r SADD myset a b c; # Again should not raise an error - # String commands instead should raise an error - catch {r SET foo bar} e - r ACL setuser newuser allcommands; # Undo commands ACL - set e - } {*NOPERM*set*} - - test {ACLs can include single subcommands} { - r ACL setuser newuser +@all -client - r ACL setuser newuser +client|setname - set cmdstr [dict get [r ACL getuser newuser] commands] - #assert_match {+@all*-client*+client|id*} $cmdstr - assert_match {+@all*-client*+client|setname*} $cmdstr - #r CLIENT ID; # Should not fail - r CLIENT SETNAME foo ; # Should not fail - catch {r CLIENT KILL ALL} e - set e - } {*NOPERM*client|kill*} - - test {ACLs can exclude single subcommands, case 1} { - r ACL setuser newuser +@all -client|kill - set cmdstr [dict get [r ACL getuser newuser] commands] - assert_equal {+@all -client|kill} $cmdstr - #r CLIENT ID; # Should not fail - r CLIENT SETNAME foo ; # Should not fail - catch {r CLIENT KILL all} e - set e - } {*NOPERM*client|kill*} - - test {ACLs can exclude single subcommands, case 2} { - r ACL setuser newuser -@all +acl +config -config|set - set cmdstr [dict get [r ACL getuser newuser] commands] - assert_match {*+config*} $cmdstr - assert_match {*-config|set*} $cmdstr - r CONFIG GET loglevel; # Should not fail - catch {r CONFIG SET loglevel debug} e - set e - } {*NOPERM*config|set*} - - test {ACLs cannot include a subcommand with a specific arg} { - r ACL setuser newuser +@all -config|get - catch { r ACL setuser newuser +config|get|appendonly} e - set e - } {*Allowing first-arg of a subcommand is not supported*} - - test {ACLs cannot exclude or include a container commands with a specific arg} { - r ACL setuser newuser +@all +config|get - catch { r ACL setuser newuser +@all +config|asdf} e - assert_match "*Unknown command or category name in ACL*" $e - catch { r ACL setuser newuser +@all -config|asdf} e - assert_match "*Unknown command or category name in ACL*" $e - } {} - -# test {ACLs cannot exclude or include a container command with two args} { -# r ACL setuser newuser +@all +config|get -# catch { r ACL setuser newuser +@all +get|key1|key2} e -# assert_match "*Unknown command or category name in ACL*" $e -# catch { r ACL setuser newuser +@all -get|key1|key2} e -# assert_match "*Unknown command or category name in ACL*" $e -# } {} - -# now pika not supported the command -# test {ACLs including of a type includes also subcommands} { -# r ACL setuser newuser -@all +del +acl +@stream -# r DEL key -# r XADD key * field value -# r XINFO STREAM key -# } - -# test {ACLs can block all DEBUG subcommands except one} { -# r ACL setuser newuser -@all +acl +del +incr +debug|object -# r DEL key -# set cmdstr [dict get [r ACL getuser newuser] commands] -# assert_match {*+debug|object*} $cmdstr -# r INCR key -# r DEBUG OBJECT key -# catch {r DEBUG SEGFAULT} e -# set e -# } {*NOPERM*debug*} - -# test {ACLs set can include subcommands, if already full command exists} { -# r ACL setuser bob +memory|doctor -# set cmdstr [dict get [r ACL getuser bob] commands] -# assert_equal {-@all +memory|doctor} $cmdstr -# -# # Validate the commands have got engulfed to +memory. -# r ACL setuser bob +memory -# set cmdstr [dict get [r ACL getuser bob] commands] -# assert_equal {-@all +memory} $cmdstr -# -# # Appending to the existing access string of bob. -# r ACL setuser bob +@all +client|id -# # Although this does nothing, we retain it anyways so we can reproduce -# # the original ACL. -# set cmdstr [dict get [r ACL getuser bob] commands] -# assert_equal {+@all +client|id} $cmdstr -# -# r ACL setuser bob >passwd1 on -# r AUTH bob passwd1 -# r CLIENT ID; # Should not fail -# r MEMORY DOCTOR; # Should not fail -# } - -# now pika not supported the command -# test {ACLs set can exclude subcommands, if already full command exists} { -# r ACL setuser alice +@all -memory|doctor -# set cmdstr [dict get [r ACL getuser alice] commands] -# assert_equal {+@all -memory|doctor} $cmdstr -# -# r ACL setuser alice >passwd1 on -# r AUTH alice passwd1 -# -# assert_error {*NOPERM*memory|doctor*} {r MEMORY DOCTOR} -# r MEMORY STATS ;# should work -# -# # Validate the commands have got engulfed to -memory. -# r ACL setuser alice +@all -memory -# set cmdstr [dict get [r ACL getuser alice] commands] -# assert_equal {+@all -memory} $cmdstr -# -# assert_error {*NOPERM*memory|doctor*} {r MEMORY DOCTOR} -# assert_error {*NOPERM*memory|stats*} {r MEMORY STATS} -# -# # Appending to the existing access string of alice. -# r ACL setuser alice -@all -# -# # Now, alice can't do anything, we need to auth newuser to execute ACL GETUSER -# r AUTH newuser passwd1 -# -# # Validate the new commands has got engulfed to -@all. -# set cmdstr [dict get [r ACL getuser alice] commands] -# assert_equal {-@all} $cmdstr -# -# r AUTH alice passwd1 -# -# assert_error {*NOPERM*get*} {r GET key} -# assert_error {*NOPERM*memory|stats*} {r MEMORY STATS} -# -# # Auth newuser before the next test -# r AUTH newuser passwd1 -# } - - test {ACL SETUSER RESET reverting to default newly created user} { - set current_user "example" - r ACL DELUSER $current_user - r ACL SETUSER $current_user - - set users [r ACL LIST] - foreach user [lshuffle $users] { - if {[string first $current_user $user] != -1} { - set current_user_output $user - } - } - - r ACL SETUSER $current_user reset - set users [r ACL LIST] - foreach user [lshuffle $users] { - if {[string first $current_user $user] != -1} { - assert_equal $current_user_output $user - } - } - } - - # Note that the order of the generated ACL rules is not stable in Redis - # so we need to match the different parts and not as a whole string. - test {ACL GETUSER is able to translate back command permissions} { - # Subtractive - # r ACL setuser newuser reset +@all ~* -@string +incr -debug +debug|digest - r ACL setuser newuser reset +@all ~* -@string +incr - set cmdstr [dict get [r ACL getuser newuser] commands] - assert_match {*+@all*} $cmdstr - assert_match {*-@string*} $cmdstr - assert_match {*+incr*} $cmdstr - #assert_match {*-debug +debug|digest**} $cmdstr - - # Additive - #r ACL setuser newuser reset +@string -incr +acl +debug|digest +debug|segfault - r ACL setuser newuser reset +@string -incr +acl - set cmdstr [dict get [r ACL getuser newuser] commands] - assert_match {*-@all*} $cmdstr - assert_match {*+@string*} $cmdstr - assert_match {*-incr*} $cmdstr - # {*+debug|digest*} $cmdstr - #assert_match {*+debug|segfault*} $cmdstr - assert_match {*+acl*} $cmdstr - } - - # A regression test make sure that as long as there is a simple - # category defining the commands, that it will be used as is. - test {ACL GETUSER provides reasonable results} { - set categories [r ACL CAT] - - # Test that adding each single category will - # result in just that category with both +@all and -@all - foreach category $categories { - # Test for future commands where allowed - r ACL setuser additive reset +@all "-@$category" - set cmdstr [dict get [r ACL getuser additive] commands] - assert_equal "+@all -@$category" $cmdstr - - # Test for future commands where disallowed - r ACL setuser restrictive reset -@all "+@$category" - set cmdstr [dict get [r ACL getuser restrictive] commands] - assert_equal "-@all +@$category" $cmdstr - } - } - - # Test that only lossless compaction of ACLs occur. - test {ACL GETUSER provides correct results} { - r ACL SETUSER adv-test - r ACL SETUSER adv-test +@all -@hash -@slow +hget - assert_equal "+@all -@hash -@slow +hget" [dict get [r ACL getuser adv-test] commands] - - # Categories are re-ordered if re-added - r ACL SETUSER adv-test -@hash - assert_equal "+@all -@slow +hget -@hash" [dict get [r ACL getuser adv-test] commands] - - # Inverting categories removes existing categories - r ACL SETUSER adv-test +@hash - assert_equal "+@all -@slow +hget +@hash" [dict get [r ACL getuser adv-test] commands] - - # Inverting the all category compacts everything - r ACL SETUSER adv-test -@all - assert_equal "-@all" [dict get [r ACL getuser adv-test] commands] - r ACL SETUSER adv-test -@string -@slow +@all - assert_equal "+@all" [dict get [r ACL getuser adv-test] commands] - - # Make sure categories are case insensitive - r ACL SETUSER adv-test -@all +@HASH +@hash +@HaSh - assert_equal "-@all +@hash" [dict get [r ACL getuser adv-test] commands] - - # Make sure commands are case insensitive - r ACL SETUSER adv-test -@all +HGET +hget +hGeT - assert_equal "-@all +hget" [dict get [r ACL getuser adv-test] commands] - - # Arbitrary category additions and removals are handled - r ACL SETUSER adv-test -@all +@hash +@slow +@set +@set +@slow +@hash - assert_equal "-@all +@set +@slow +@hash" [dict get [r ACL getuser adv-test] commands] - - # Arbitrary command additions and removals are handled - r ACL SETUSER adv-test -@all +hget -hset +hset -hget - assert_equal "-@all +hset -hget" [dict get [r ACL getuser adv-test] commands] - - # Arbitrary subcommands are compacted - r ACL SETUSER adv-test -@all +client|list +client|list +config|get +config +acl|list -acl - assert_equal "-@all +client|list +config -acl" [dict get [r ACL getuser adv-test] commands] - - # Unnecessary categories are retained for potentional future compatibility (pika not supported `dangerous`) - #r ACL SETUSER adv-test -@all -@dangerous - #assert_equal "-@all -@dangerous" [dict get [r ACL getuser adv-test] commands] - - # Duplicate categories are compressed, regression test for #12470 - r ACL SETUSER adv-test -@all +config +config|get -config|set +config - assert_equal "-@all +config" [dict get [r ACL getuser adv-test] commands] - } - - test "ACL CAT with illegal arguments" { - assert_error {*Unknown category 'NON_EXISTS'} {r ACL CAT NON_EXISTS} - assert_error {*unknown subcommand or wrong number of arguments for 'CAT'*} {r ACL CAT NON_EXISTS NON_EXISTS2} - } - - test "ACL CAT without category - list all categories" { - set categories [r acl cat] - assert_not_equal [lsearch $categories "keyspace"] -1 - assert_not_equal [lsearch $categories "connection"] -1 - } - - test "ACL CAT category - list all commands/subcommands that belong to category" { - # now pika not supported the command - #assert_not_equal [lsearch [r acl cat transaction] "multi"] -1 - #assert_not_equal [lsearch [r acl cat scripting] "function|list"] -1 - - # Negative check to make sure it doesn't actually return all commands. - assert_equal [lsearch [r acl cat keyspace] "set"] -1 - #assert_equal [lsearch [r acl cat stream] "get"] -1 - } - -# now pika not supported the command -# test "ACL requires explicit permission for scripting for EVAL_RO, EVALSHA_RO and FCALL_RO" { -# r ACL SETUSER scripter on nopass +readonly -# assert_match {*has no permissions to run the 'eval_ro' command*} [r ACL DRYRUN scripter EVAL_RO "" 0] -# assert_match {*has no permissions to run the 'evalsha_ro' command*} [r ACL DRYRUN scripter EVALSHA_RO "" 0] -# assert_match {*has no permissions to run the 'fcall_ro' command*} [r ACL DRYRUN scripter FCALL_RO "" 0] -# } - -# now pika not supported the command -# test {ACL #5998 regression: memory leaks adding / removing subcommands} { -# r AUTH default "" -# r ACL setuser newuser reset -debug +debug|a +debug|b +debug|c -# r ACL setuser newuser -debug -# # The test framework will detect a leak if any. -# } - -# now pika not supported the command -# test {ACL LOG aggregates similar errors together and assigns unique entry-id to new errors} { -# r ACL LOG RESET -# r ACL setuser user1 >foo -# assert_error "*WRONGPASS*" {r AUTH user1 doo} -# set entry_id_initial_error [dict get [lindex [r ACL LOG] 0] entry-id] -# set timestamp_created_original [dict get [lindex [r ACL LOG] 0] timestamp-created] -# set timestamp_last_update_original [dict get [lindex [r ACL LOG] 0] timestamp-last-updated] -# after 1 -# for {set j 0} {$j < 10} {incr j} { -# assert_error "*WRONGPASS*" {r AUTH user1 doo} -# } -# set entry_id_lastest_error [dict get [lindex [r ACL LOG] 0] entry-id] -# set timestamp_created_updated [dict get [lindex [r ACL LOG] 0] timestamp-created] -# set timestamp_last_updated_after_update [dict get [lindex [r ACL LOG] 0] timestamp-last-updated] -# assert {$entry_id_lastest_error eq $entry_id_initial_error} -# assert {$timestamp_last_update_original < $timestamp_last_updated_after_update} -# assert {$timestamp_created_original eq $timestamp_created_updated} -# r ACL setuser user2 >doo -# assert_error "*WRONGPASS*" {r AUTH user2 foo} -# set new_error_entry_id [dict get [lindex [r ACL LOG] 0] entry-id] -# assert {$new_error_entry_id eq $entry_id_lastest_error + 1 } -# } -# - test {ACL LOG shows failed command executions at toplevel} { - r ACL LOG RESET - r ACL setuser antirez >foo on +set ~object:1234 - r ACL setuser antirez +multi +exec - r ACL setuser antirez resetchannels +publish - r AUTH antirez foo - assert_error "*NOPERM*get*" {r GET foo} - r AUTH default "" - set entry [lindex [r ACL LOG] 0] - assert {[dict get $entry username] eq {antirez}} - assert {[dict get $entry context] eq {toplevel}} - assert {[dict get $entry reason] eq {command}} - assert {[dict get $entry object] eq {get}} - assert_match {*cmd=get*} [dict get $entry client-info] - } - -# test "ACL LOG shows failed subcommand executions at toplevel" { -# r ACL LOG RESET -# r ACL DELUSER demo -# r ACL SETUSER demo on nopass -# r AUTH demo "" -# assert_error "*NOPERM*script|help*" {r SCRIPT HELP} -# r AUTH default "" -# set entry [lindex [r ACL LOG] 0] -# assert_equal [dict get $entry username] {demo} -# assert_equal [dict get $entry context] {toplevel} -# assert_equal [dict get $entry reason] {command} -# assert_equal [dict get $entry object] {script|help} -# } - - test {ACL LOG is able to test similar events} { - r ACL LOG RESET - r AUTH antirez foo - catch {r GET foo} - catch {r GET foo} - catch {r GET foo} - r AUTH default "" - set entry [lindex [r ACL LOG] 0] - assert {[dict get $entry count] == 3} - } - - test {ACL LOG is able to log keys access violations and key name} { - r AUTH antirez foo - catch {r SET somekeynotallowed 1234} - r AUTH default "" - set entry [lindex [r ACL LOG] 0] - assert {[dict get $entry reason] eq {key}} - assert {[dict get $entry object] eq {somekeynotallowed}} - } - - test {ACL LOG is able to log channel access violations and channel name} { - r AUTH antirez foo - catch {r PUBLISH somechannelnotallowed nullmsg} - r AUTH default "" - set entry [lindex [r ACL LOG] 0] - assert {[dict get $entry reason] eq {channel}} - assert {[dict get $entry object] eq {somechannelnotallowed}} - } - - test {ACL LOG RESET is able to flush the entries in the log} { - r ACL LOG RESET - assert {[llength [r ACL LOG]] == 0} - } - - test {ACL LOG can distinguish the transaction context (1)} { - r AUTH antirez foo - r MULTI - catch {r INCR foo} - catch {r EXEC} - r AUTH default "" - set entry [lindex [r ACL LOG] 0] - assert {[dict get $entry context] eq {multi}} - assert {[dict get $entry object] eq {incr}} - } - - test {ACL LOG can distinguish the transaction context (2)} { - set rd1 [redis_deferring_client] - r ACL SETUSER antirez +incr - - r AUTH antirez foo - r MULTI - r INCR object:1234 - $rd1 ACL SETUSER antirez -incr - $rd1 read - catch {r EXEC} - $rd1 close - r AUTH default "" - set entry [lindex [r ACL LOG] 0] - assert {[dict get $entry context] eq {multi}} - assert {[dict get $entry object] eq {incr}} - r ACL SETUSER antirez -incr - } - -# now pika not supported lua command -# test {ACL can log errors in the context of Lua scripting} { -# r AUTH antirez foo -# catch {r EVAL {redis.call('incr','foo')} 0} -# r AUTH default "" -# set entry [lindex [r ACL LOG] 0] -# assert {[dict get $entry context] eq {lua}} -# assert {[dict get $entry object] eq {incr}} -# assert_match {*cmd=eval*} [dict get $entry client-info] -# } - - test {ACL LOG can accept a numerical argument to show less entries} { - r AUTH antirez foo - catch {r INCR foo} - catch {r INCR foo} - catch {r INCR foo} - catch {r INCR foo} - r AUTH default "" - assert {[llength [r ACL LOG]] > 1} - assert {[llength [r ACL LOG 2]] == 2} - } - - test {ACL LOG can log failed auth attempts} { - catch {r AUTH antirez wrong-password} - set entry [lindex [r ACL LOG] 0] - assert {[dict get $entry context] eq {toplevel}} - assert {[dict get $entry reason] eq {auth}} - assert {[dict get $entry object] eq {AUTH}} - assert {[dict get $entry username] eq {antirez}} - } - - test {ACL LOG entries are limited to a maximum amount} { - r ACL LOG RESET - r CONFIG SET acllog-max-len 5 - r AUTH antirez foo - for {set j 0} {$j < 10} {incr j} { - catch {r SET obj:$j 123} - } - r AUTH default "" - assert {[llength [r ACL LOG]] == 5} - } - -# test {When default user is off, new connections are not authenticated} { -# r ACL setuser default off -# catch {set rd1 [redis_deferring_client]} e -# r ACL setuser default on -# set e -# } {*NOAUTH*} - - test {When default user has no command permission, hello command still works for other users} { - r ACL setuser secure-user >supass on +@all - r ACL setuser default -@all - r HELLO 2 AUTH secure-user supass - r ACL setuser default nopass +@all - r AUTH default "" - } - - test {When an authentication chain is used in the HELLO cmd, the last auth cmd has precedence} { - r ACL setuser secure-user1 >supass on +@all - r ACL setuser secure-user2 >supass on +@all - r HELLO 2 AUTH secure-user supass AUTH secure-user2 supass AUTH secure-user1 supass - assert_equal [r ACL whoami] {secure-user1} - catch {r HELLO 2 AUTH secure-user supass AUTH secure-user2 supass AUTH secure-user pass} e - assert_match "WRONGPASS invalid username-password pair or user is disabled." $e - assert_equal [r ACL whoami] {secure-user2} - } - - test {When a setname chain is used in the HELLO cmd, the last setname cmd has precedence} { - r HELLO 2 setname client1 setname client2 setname client3 setname client4 - assert_equal [r client getname] {client4} - catch {r HELLO 2 setname client5 setname client6 setname "client name"} e - assert_match "ERR Client names cannot contain spaces, newlines or special characters." $e - assert_equal [r client getname] {client6} - } - - test {When authentication fails in the HELLO cmd, the client setname should not be applied} { - r client setname client0 - catch {r HELLO 2 AUTH user pass setname client1} e - assert_match "WRONGPASS invalid username-password pair or user is disabled." $e - assert {[r client getname] eq {client0}} - } - - test {ACL HELP should not have unexpected options} { - catch {r ACL help xxx} e - assert_match "*wrong number of arguments for 'acl|help' command" $e - } - - test {Delete a user that the client doesn't use} { - r ACL setuser not_used on >passwd - assert {[r ACL deluser not_used] == 1} - # The client is not closed - assert {[r ping] eq {PONG}} - } - - test {Delete a user that the client is using} { - r ACL setuser using on +acl >passwd - r AUTH using passwd - # The client will receive reply normally - assert {[r ACL deluser using] == 1} - # The client is closed - catch {[r ping]} e - assert_match "*I/O error*" $e - } - - test {ACL GENPASS command failed test} { - catch {r ACL genpass -236} err1 - catch {r ACL genpass 5000} err2 - assert_match "*ACL GENPASS argument must be the number*" $err1 - assert_match "*ACL GENPASS argument must be the number*" $err2 - } - - test {Default user can not be removed} { - catch {r ACL deluser default} err - set err - } {ERR The 'default' user cannot be removed} - - test {ACL load non-existing configured ACL file} { - catch {r ACL load} err - set err - } {*not configured to use an ACL file*} - - # If there is an AUTH failure the metric increases -# test {ACL-Metrics user AUTH failure} { -# set current_auth_failures [s acl_access_denied_auth] -# set current_invalid_cmd_accesses [s acl_access_denied_cmd] -# set current_invalid_key_accesses [s acl_access_denied_key] -# set current_invalid_channel_accesses [s acl_access_denied_channel] -# assert_error "*WRONGPASS*" {r AUTH notrealuser 1233456} -# assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 1]} -# assert_error "*WRONGPASS*" {r HELLO 3 AUTH notrealuser 1233456} -# assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 2]} -# assert_error "*WRONGPASS*" {r HELLO 2 AUTH notrealuser 1233456} -# assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 3]} -# assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses} -# assert {[s acl_access_denied_key] eq $current_invalid_key_accesses} -# assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses} -# } -# -# # If a user try to access an unauthorized command the metric increases -# test {ACL-Metrics invalid command accesses} { -# set current_auth_failures [s acl_access_denied_auth] -# set current_invalid_cmd_accesses [s acl_access_denied_cmd] -# set current_invalid_key_accesses [s acl_access_denied_key] -# set current_invalid_channel_accesses [s acl_access_denied_channel] -# r ACL setuser invalidcmduser on >passwd nocommands -# r AUTH invalidcmduser passwd -# assert_error "*no permissions to run the * command*" {r acl list} -# r AUTH default "" -# assert {[s acl_access_denied_auth] eq $current_auth_failures} -# assert {[s acl_access_denied_cmd] eq [expr $current_invalid_cmd_accesses + 1]} -# assert {[s acl_access_denied_key] eq $current_invalid_key_accesses} -# assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses} -# } -# -# # If a user try to access an unauthorized key the metric increases -# test {ACL-Metrics invalid key accesses} { -# set current_auth_failures [s acl_access_denied_auth] -# set current_invalid_cmd_accesses [s acl_access_denied_cmd] -# set current_invalid_key_accesses [s acl_access_denied_key] -# set current_invalid_channel_accesses [s acl_access_denied_channel] -# r ACL setuser invalidkeyuser on >passwd resetkeys allcommands -# r AUTH invalidkeyuser passwd -# assert_error "*NOPERM*key*" {r get x} -# r AUTH default "" -# assert {[s acl_access_denied_auth] eq $current_auth_failures} -# assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses} -# assert {[s acl_access_denied_key] eq [expr $current_invalid_key_accesses + 1]} -# assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses} -# } -# -# # If a user try to access an unauthorized channel the metric increases -# test {ACL-Metrics invalid channels accesses} { -# set current_auth_failures [s acl_access_denied_auth] -# set current_invalid_cmd_accesses [s acl_access_denied_cmd] -# set current_invalid_key_accesses [s acl_access_denied_key] -# set current_invalid_channel_accesses [s acl_access_denied_channel] -# r ACL setuser invalidchanneluser on >passwd resetchannels allcommands -# r AUTH invalidkeyuser passwd -# assert_error "*NOPERM*channel*" {r subscribe x} -# r AUTH default "" -# assert {[s acl_access_denied_auth] eq $current_auth_failures} -# assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses} -# assert {[s acl_access_denied_key] eq $current_invalid_key_accesses} -# assert {[s acl_access_denied_channel] eq [expr $current_invalid_channel_accesses + 1]} -# } -} - -set server_path [tmpdir "server.acl"] -set base_path ${server_path}/ -exec cp -f tests/assets/user.acl $base_path -set acl_file ${base_path}user.acl -start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "allchannels" "aclfile" $acl_file ] tags [list "external:skip"]] { - # user alice on allcommands allkeys &* >alice - # user bob on -@all +@set +acl ~set* &* >bob - # user default on nopass ~* &* +@all - - test {default: load from include file, can access any channels} { - r SUBSCRIBE foo - r PSUBSCRIBE bar* - r UNSUBSCRIBE - r PUNSUBSCRIBE - r PUBLISH hello world - } - - test {default: with config acl-pubsub-default allchannels after reset, can access any channels} { - r ACL setuser default reset on nopass ~* +@all - r SUBSCRIBE foo - r PSUBSCRIBE bar* - r UNSUBSCRIBE - r PUNSUBSCRIBE - r PUBLISH hello world - } - - test {default: with config acl-pubsub-default resetchannels after reset, can not access any channels} { - r CONFIG SET acl-pubsub-default resetchannels - r ACL setuser default reset on nopass ~* +@all - assert_error {*NOPERM*channel*} {r SUBSCRIBE foo} - assert_error {*NOPERM*channel*} {r PSUBSCRIBE bar*} - assert_error {*NOPERM*channel*} {r PUBLISH hello world} - r CONFIG SET acl-pubsub-default resetchannels - } - - test {Alice: can execute all command} { - r AUTH alice alice - assert_equal "alice" [r acl whoami] - r SET key value - } - - test {Bob: just execute @set and acl command} { - r AUTH bob bob - assert_equal "bob" [r acl whoami] - # The test was passed on local machine, Restarting the pika data will still exist, - # which may cause the test to fail, so remove it - #assert_equal "3" [r sadd set 1 2 3] - catch {r SET key value} e - set e - } {*NOPERM*set*} - - test {ACL load and save} { - r ACL setuser eve +get allkeys >eve on - r ACL save - - # ACL load will free user and kill clients - r ACL load - catch {r ACL LIST} e - assert_match {*I/O error*} $e - - reconnect - r AUTH alice alice - r SET key value - r AUTH eve eve - r GET key - catch {r SET key value} e - set e - } {*NOPERM*set*} - - test {ACL load and save with restricted channels} { - r AUTH alice alice - r ACL setuser harry on nopass resetchannels &test +@all ~* - r ACL save - - # ACL load will free user and kill clients - r ACL load - catch {r ACL LIST} e - assert_match {*I/O error*} $e - - reconnect - r AUTH harry anything - r publish test bar - catch {r publish test1 bar} e - r ACL deluser harry - set e - } {*NOPERM*channel*} -} - -set server_path [tmpdir "resetchannels.acl"] -set base_path ${server_path}/ -exec cp -f tests/assets/nodefaultuser.acl $base_path -exec cp -f tests/assets/default.conf $server_path -set acl_file ${base_path}nodefaultuser.acl -start_server [list overrides [list "dir" $server_path "aclfile" $acl_file] tags [list "external:skip"]] { - - test {Default user has access to all channels irrespective of flag} { - set channelinfo [dict get [r ACL getuser default] channels] - assert_equal "&*" $channelinfo - set channelinfo [dict get [r ACL getuser alice] channels] - assert_equal "" $channelinfo - } - - test {Update acl-pubsub-default, existing users shouldn't get affected} { - set channelinfo [dict get [r ACL getuser default] channels] - assert_equal "&*" $channelinfo - r CONFIG set acl-pubsub-default allchannels - r ACL setuser mydefault - set channelinfo [dict get [r ACL getuser mydefault] channels] - assert_equal "&*" $channelinfo - r CONFIG set acl-pubsub-default resetchannels - set channelinfo [dict get [r ACL getuser mydefault] channels] - assert_equal "&*" $channelinfo - } - - test {Single channel is valid} { - r ACL setuser onechannel &test - set channelinfo [dict get [r ACL getuser onechannel] channels] - assert_equal "&test" $channelinfo - r ACL deluser onechannel - } - - test {Single channel is not valid with allchannels} { - r CONFIG set acl-pubsub-default allchannels - catch {r ACL setuser onechannel &test} err - r CONFIG set acl-pubsub-default resetchannels - set err - } {*start with an empty list of channels*} -} - -set server_path [tmpdir "resetchannels.acl"] -set base_path ${server_path}/ -exec cp -f tests/assets/nodefaultuser.acl $base_path -exec cp -f tests/assets/default.conf $server_path -set acl_file ${base_path}nodefaultuser.acl -start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "resetchannels" "aclfile" $acl_file] tags [list "external:skip"]] { - - test {Only default user has access to all channels irrespective of flag} { - set channelinfo [dict get [r ACL getuser default] channels] - assert_equal "&*" $channelinfo - set channelinfo [dict get [r ACL getuser alice] channels] - assert_equal "" $channelinfo - } -} - - -start_server {overrides {user "default on nopass ~* +@all"} tags {"external:skip"}} { - test {default: load from config file, without channel permission default user can't access any channels} { - catch {r SUBSCRIBE foo} e - set e - } {*NOPERM*channel*} -} - -start_server {overrides {user "default on nopass ~* &* +@all"} tags {"external:skip"}} { - test {default: load from config file with all channels permissions} { - r SUBSCRIBE foo - r PSUBSCRIBE bar* - r UNSUBSCRIBE - r PUNSUBSCRIBE - r PUBLISH hello world - } -} - -set server_path [tmpdir "duplicate.acl"] -set base_path ${server_path}/ -exec cp -f tests/assets/user.acl $base_path -exec cp -f tests/assets/default.conf $server_path -set acl_file ${base_path}user.acl -start_server [list overrides [list "dir" $server_path "aclfile" $acl_file] tags [list "external:skip"]] { - - test {Test loading an ACL file with duplicate users} { - exec cp -f tests/assets/user.acl $base_path - - # Corrupt the ACL file - set corruption "\nuser alice on nopass ~* -@all" - exec echo $corruption >> ${base_path}user.acl - catch {r ACL LOAD} err - assert_match {*Duplicate user 'alice' found*} $err - - # Verify the previous users still exist - # NOTE: A missing user evaluates to an empty - # string. - assert {[r ACL GETUSER alice] != ""} - assert_equal [dict get [r ACL GETUSER alice] commands] "+@all" - assert {[r ACL GETUSER bob] != ""} - assert {[r ACL GETUSER default] != ""} - } - - test {Test loading an ACL file with duplicate default user} { - exec cp -f tests/assets/user.acl $base_path - - # Corrupt the ACL file - set corruption "\nuser default on nopass ~* -@all" - exec echo $corruption >> ${base_path}user.acl - catch {r ACL LOAD} err - assert_match {*Duplicate user 'default' found*} $err - - # Verify the previous users still exist - # NOTE: A missing user evaluates to an empty - # string. - assert {[r ACL GETUSER alice] != ""} - assert_equal [dict get [r ACL GETUSER alice] commands] "+@all" - assert {[r ACL GETUSER bob] != ""} - assert {[r ACL GETUSER default] != ""} - } -} - -# test on local machine is passed -#Because the tcl test was slow and there was a problem with restarting the service, everything was removed -#start_server {overrides {user "default on nopass ~* +@all -flushdb"} tags {acl external:skip}} { -# test {ACL from config file and config rewrite} { -# assert_error {NOPERM *} {r flushdb} -# r config rewrite -# restart_server 0 true false -# assert_error {NOPERM *} {r flushdb} -# } -#} diff --git a/tests/unit/aofrw.tcl b/tests/unit/aofrw.tcl deleted file mode 100644 index a2d74168f..000000000 --- a/tests/unit/aofrw.tcl +++ /dev/null @@ -1,210 +0,0 @@ -start_server {tags {"aofrw"}} { - # Enable the AOF - r config set appendonly yes - r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite. - waitForBgrewriteaof r - - test {AOF rewrite during write load} { - # Start a write load for 10 seconds - set master [srv 0 client] - set master_host [srv 0 host] - set master_port [srv 0 port] - set load_handle0 [start_write_load $master_host $master_port 10] - set load_handle1 [start_write_load $master_host $master_port 10] - set load_handle2 [start_write_load $master_host $master_port 10] - set load_handle3 [start_write_load $master_host $master_port 10] - set load_handle4 [start_write_load $master_host $master_port 10] - - # Make sure the instance is really receiving data - wait_for_condition 50 100 { - [r dbsize] > 0 - } else { - fail "No write load detected." - } - - # After 3 seconds, start a rewrite, while the write load is still - # active. - after 3000 - r bgrewriteaof - waitForBgrewriteaof r - - # Let it run a bit more so that we'll append some data to the new - # AOF. - after 1000 - - # Stop the processes generating the load if they are still active - stop_write_load $load_handle0 - stop_write_load $load_handle1 - stop_write_load $load_handle2 - stop_write_load $load_handle3 - stop_write_load $load_handle4 - - # Make sure that we remain the only connected client. - # This step is needed to make sure there are no pending writes - # that will be processed between the two "debug digest" calls. - wait_for_condition 50 100 { - [llength [split [string trim [r client list]] "\n"]] == 1 - } else { - puts [r client list] - fail "Clients generating loads are not disconnecting" - } - - # Get the data set digest - set d1 [r debug digest] - - # Load the AOF - r debug loadaof - set d2 [r debug digest] - - # Make sure they are the same - assert {$d1 eq $d2} - } -} - -start_server {tags {"aofrw"}} { - test {Turning off AOF kills the background writing child if any} { - r config set appendonly yes - waitForBgrewriteaof r - r multi - r bgrewriteaof - r config set appendonly no - r exec - wait_for_condition 50 100 { - [string match {*Killing*AOF*child*} [exec tail -n5 < [srv 0 stdout]]] - } else { - fail "Can't find 'Killing AOF child' into recent logs" - } - } - - foreach d {string int} { - foreach e {ziplist linkedlist} { - test "AOF rewrite of list with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r lpush key $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {intset hashtable} { - test "AOF rewrite of set with $e encoding, $d data" { - r flushall - if {$e eq {intset}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r sadd key $data - } - if {$d ne {string}} { - assert_equal [r object encoding key] $e - } - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {ziplist hashtable} { - test "AOF rewrite of hash with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r hset key $data $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {ziplist skiplist} { - test "AOF rewrite of zset with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r zadd key [expr rand()] $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - test {BGREWRITEAOF is delayed if BGSAVE is in progress} { - r multi - r bgsave - r bgrewriteaof - r info persistence - set res [r exec] - assert_match {*scheduled*} [lindex $res 1] - assert_match {*aof_rewrite_scheduled:1*} [lindex $res 2] - while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} { - after 100 - } - } - - test {BGREWRITEAOF is refused if already in progress} { - catch { - r multi - r bgrewriteaof - r bgrewriteaof - r exec - } e - assert_match {*ERR*already*} $e - while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} { - after 100 - } - } -} diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl deleted file mode 100644 index 0ec35985a..000000000 --- a/tests/unit/auth.tcl +++ /dev/null @@ -1,43 +0,0 @@ -start_server {tags {"auth"}} { - test {AUTH fails if there is no password configured server side} { - catch {r auth foo} err - set _ $err - } {ERR*no password*} -} - -start_server {tags {"auth"} overrides {requirepass foobar}} { -# test {AUTH fails when a wrong password is given} { -# catch {r auth wrong!} err -# set _ $err -# } {ERR*invalid password} - -# test {AUTH succeeds when the right password is given} { -# r auth foobar -# } {OK} -# -# test {Once AUTH succeeded we can actually send commands to the server} { -# r set foo 100 -# r incr foo -# } {101} -} - -start_server {tags {"auth"} overrides {userpass foobar}} { -# test {AUTH fails when a wrong password is given} { -# catch {r auth wrong!} err -# set _ $err -# } {ERR*invalid password} -# -# test {Arbitrary command gives an error when AUTH is required} { -# catch {r set foo bar} err -# set _ $err -# } {ERR*NOAUTH*} - -# test {AUTH succeeds when the right password is given} { -# r auth foobar -# } {OK} -# -# test {Once AUTH succeeded we can actually send commands to the server} { -# r set foo 100 -# r incr foo -# } {101} -} diff --git a/tests/unit/basic.tcl b/tests/unit/basic.tcl deleted file mode 100644 index 6988e46a2..000000000 --- a/tests/unit/basic.tcl +++ /dev/null @@ -1,789 +0,0 @@ -start_server {tags {"basic"}} { - test {DEL all keys to start with a clean DB} { - foreach key [r keys *] {r del $key} - r dbsize - } {0} - - test {SET and GET an item} { - r set x foobar - r get x - } {foobar} - - test {SET and GET an empty item} { - r set x {} - r get x - } {} - - test {DEL against a single item} { - r del x - r get x - } {} - - test {Vararg DEL} { - r set foo1 a - r set foo2 b - r set foo3 c - list [r del foo1 foo2 foo3 foo4] [r mget foo1 foo2 foo3] - } {3 {{} {} {}}} - - test {KEYS with pattern} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r set $key hello - } - lsort [r keys foo*] - } {foo_a foo_b foo_c} - - test {KEYS to get all keys} { - lsort [r keys *] - } {foo_a foo_b foo_c key_x key_y key_z} - - test {DBSIZE} { - r info keyspace 1 - after 1000 - r dbsize - } {6} - - test {DEL all keys} { - foreach key [r keys *] {r del $key} - r info keyspace 1 - after 1000 - r dbsize - } {0} - -# test {Very big payload in GET/SET} { -# set buf [string repeat "abcd" 1000000] -# r set foo $buf -# r get foo -# } [string repeat "abcd" 1000000] - -# tags {"slow"} { -# test {Very big payload random access} { -# set err {} -# array set payload {} -# for {set j 0} {$j < 100} {incr j} { -# set size [expr 1+[randomInt 100000]] -# set buf [string repeat "pl-$j" $size] -# set payload($j) $buf -# r set bigpayload_$j $buf -# } -# for {set j 0} {$j < 1000} {incr j} { -# set index [randomInt 100] -# set buf [r get bigpayload_$index] -# if {$buf != $payload($index)} { -# set err "Values differ: I set '$payload($index)' but I read back '$buf'" -# break -# } -# } -# unset payload -# set _ $err -# } {} -# -# test {SET 10000 numeric keys and access all them in reverse order} { -# set err {} -# for {set x 0} {$x < 10000} {incr x} { -# r set $x $x -# } -# set sum 0 -# for {set x 9999} {$x >= 0} {incr x -1} { -# set val [r get $x] -# if {$val ne $x} { -# set err "Element at position $x is $val instead of $x" -# break -# } -# } -# set _ $err -# } {} - -# test {DBSIZE should be 10101 now} { -# r info keyspace 1 -# after 1000 -# r dbsize -# } {10101} -# } - - test {INCR against non existing key} { - set res {} - append res [r incr novar] - append res [r get novar] - } {11} - - test {INCR against key created by incr itself} { - r incr novar - } {2} - - test {INCR against key originally set with SET} { - r set novar 100 - r incr novar - } {101} - - test {INCR over 32bit value} { - r set novar 17179869184 - r incr novar - } {17179869185} - - test {INCRBY over 32bit value with over 32bit increment} { - r set novar 17179869184 - r incrby novar 17179869184 - } {34359738368} - -# test {INCR fails against key with spaces (left)} { -# r set novar " 11" -# catch {r incr novar} err -# format $err -# } {ERR*} - - test {INCR fails against key with spaces (right)} { - r set novar "11 " - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against key with spaces (both)} { - r set novar " 11 " - catch {r incr novar} err - format $err - } {ERR*} - -# test {INCR fails against a key holding a list} { -# r rpush mylist 1 -# catch {r incr mylist} err -# r rpop mylist -# format $err -# } {WRONGTYPE*} - - test {DECRBY over 32bit value with over 32bit increment, negative res} { - r set novar 17179869184 - r decrby novar 17179869185 - } {-1} - - test {INCRBYFLOAT against non existing key} { - r del novar - list [roundFloat [r incrbyfloat novar 1]] \ - [roundFloat [r get novar]] \ - [roundFloat [r incrbyfloat novar 0.25]] \ - [roundFloat [r get novar]] - } {1 1 1.25 1.25} - - test {INCRBYFLOAT against key originally set with SET} { - r set novar 1.5 - roundFloat [r incrbyfloat novar 1.5] - } {3} - - test {INCRBYFLOAT over 32bit value} { - r set novar 17179869184 - r incrbyfloat novar 1.5 - } {17179869185.5} - - test {INCRBYFLOAT over 32bit value with over 32bit increment} { - r set novar 17179869184 - r incrbyfloat novar 17179869184 - } {34359738368} - - test {INCRBYFLOAT fails against key with spaces (left)} { - set err {} - r set novar " 11" - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against key with spaces (right)} { - set err {} - r set novar "11 " - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against key with spaces (both)} { - set err {} - r set novar " 11 " - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - -# test {INCRBYFLOAT fails against a key holding a list} { -# r del mylist -# set err {} -# r rpush mylist 1 -# catch {r incrbyfloat mylist 1.0} err -# r del mylist -# format $err -# } {WRONGTYPE*} - - test {INCRBYFLOAT does not allow NaN or Infinity} { - r set foo 0 - set err {} - catch {r incrbyfloat foo +inf} err - set err - # p.s. no way I can force NaN to test it from the API because - # there is no way to increment / decrement by infinity nor to - # perform divisions. - } {ERR*would produce*} - - test {INCRBYFLOAT decrement} { - r set foo 1 - roundFloat [r incrbyfloat foo -1.1] - } {-0.1} - - test "SETNX target key missing" { - r del novar - assert_equal 1 [r setnx novar foobared] - assert_equal "foobared" [r get novar] - } - - test "SETNX target key exists" { - r set novar foobared - assert_equal 0 [r setnx novar blabla] - assert_equal "foobared" [r get novar] - } - - test "SETNX against not-expired volatile key" { - r set x 10 - r expire x 10000 - assert_equal 0 [r setnx x 20] - assert_equal 10 [r get x] - } - - test "SETNX against expired volatile key" { - # Make it very unlikely for the key this test uses to be expired by the - # active expiry cycle. This is tightly coupled to the implementation of - # active expiry and dbAdd() but currently the only way to test that - # SETNX expires a key when it should have been. - for {set x 0} {$x < 9999} {incr x} { - r setex key-$x 3600 value - } - - # This will be one of 10000 expiring keys. A cycle is executed every - # 100ms, sampling 10 keys for being expired or not. This key will be - # expired for at most 1s when we wait 2s, resulting in a total sample - # of 100 keys. The probability of the success of this test being a - # false positive is therefore approx. 1%. - r set x 10 - r expire x 1 - - # Wait for the key to expire - after 2000 - - assert_equal 1 [r setnx x 20] - assert_equal 20 [r get x] - } - -# test "DEL against expired key" { -# r debug set-active-expire 0 -# r setex keyExpire 1 valExpire -# after 1100 -# assert_equal 0 [r del keyExpire] -# r debug set-active-expire 1 -# } - - test {EXISTS} { - set res {} - r set newkey test - append res [r exists newkey] - r del newkey - append res [r exists newkey] - } {10} - - test {Zero length value in key. SET/GET/EXISTS} { - r set emptykey {} - set res [r get emptykey] - append res [r exists emptykey] - r del emptykey - append res [r exists emptykey] - } {10} - - test {Commands pipelining} { - set fd [r channel] - puts -nonewline $fd "SET k1 xyzk\r\nGET k1\r\nPING\r\n" - flush $fd - set res {} - append res [string match OK* [r read]] - append res [r read] - append res [string match PONG* [r read]] - format $res - } {1xyzk1} - - test {Non existing command} { - catch {r foobaredcommand} err - string match ERR* $err - } {1} - -# test {RENAME basic usage} { -# r set mykey hello -# r rename mykey mykey1 -# r rename mykey1 mykey2 -# r get mykey2 -# } {hello} - -# test {RENAME source key should no longer exist} { -# r exists mykey -# } {0} - -# test {RENAME against already existing key} { -# r set mykey a -# r set mykey2 b -# r rename mykey2 mykey -# set res [r get mykey] -# append res [r exists mykey2] -# } {b0} - -# test {RENAMENX basic usage} { -# r del mykey -# r del mykey2 -# r set mykey foobar -# r renamenx mykey mykey2 -# set res [r get mykey2] -# append res [r exists mykey] -# } {foobar0} -# -# test {RENAMENX against already existing key} { -# r set mykey foo -# r set mykey2 bar -# r renamenx mykey mykey2 -# } {0} -# -# test {RENAMENX against already existing key (2)} { -# set res [r get mykey] -# append res [r get mykey2] -# } {foobar} -# -# test {RENAME against non existing source key} { -# catch {r rename nokey foobar} err -# format $err -# } {ERR*} -# -# test {RENAME where source and dest key is the same} { -# catch {r rename mykey mykey} err -# format $err -# } {ERR*} -# -# test {RENAME with volatile key, should move the TTL as well} { -# r del mykey mykey2 -# r set mykey foo -# r expire mykey 100 -# assert {[r ttl mykey] > 95 && [r ttl mykey] <= 100} -# r rename mykey mykey2 -# assert {[r ttl mykey2] > 95 && [r ttl mykey2] <= 100} -# } -# -# test {RENAME with volatile key, should not inherit TTL of target key} { -# r del mykey mykey2 -# r set mykey foo -# r set mykey2 bar -# r expire mykey2 100 -# assert {[r ttl mykey] == -1 && [r ttl mykey2] > 0} -# r rename mykey mykey2 -# r ttl mykey2 -# } {-1} - -# test {DEL all keys again (DB 0)} { -# foreach key [r keys *] { -# r del $key -# } -# r dbsize -# } {0} - -# test {DEL all keys again (DB 1)} { -# r select 10 -# foreach key [r keys *] { -# r del $key -# } -# set res [r dbsize] -# r select 9 -# format $res -# } {0} - -# test {MOVE basic usage} { -# r set mykey foobar -# r move mykey 10 -# set res {} -# lappend res [r exists mykey] -# lappend res [r dbsize] -# r select 10 -# lappend res [r get mykey] -# lappend res [r dbsize] -# r select 9 -# format $res -# } [list 0 0 foobar 1] - -# test {MOVE against key existing in the target DB} { -# r set mykey hello -# r move mykey 10 -# } {0} - -# test {MOVE against non-integer DB (#1428)} { -# r set mykey hello -# catch {r move mykey notanumber} e -# set e -# } {*ERR*index out of range} - -# test {SET/GET keys in different DBs} { -# r set a hello -# r set b world -# r select 10 -# r set a foo -# r set b bared -# r select 9 -# set res {} -# lappend res [r get a] -# lappend res [r get b] -# r select 10 -# lappend res [r get a] -# lappend res [r get b] -# r select 9 -# format $res -# } {hello world foo bared} - -# test {MGET} { -# r flushdb -# r set foo BAR -# r set bar FOO -# r mget foo bar -# } {BAR FOO} - -# test {MGET against non existing key} { -# r mget foo baazz bar -# } {BAR {} FOO} -# -# test {MGET against non-string key} { -# r sadd myset ciao -# r sadd myset bau -# r mget foo baazz bar myset -# } {BAR {} FOO {}} - -# test {RANDOMKEY} { -# r flushdb -# r set foo x -# r set bar y -# set foo_seen 0 -# set bar_seen 0 -# for {set i 0} {$i < 100} {incr i} { -# set rkey [r randomkey] -# if {$rkey eq {foo}} { -# set foo_seen 1 -# } -# if {$rkey eq {bar}} { -# set bar_seen 1 -# } -# } -# list $foo_seen $bar_seen -# } {1 1} -# -# test {RANDOMKEY against empty DB} { -# r flushdb -# r randomkey -# } {} -# -# test {RANDOMKEY regression 1} { -# r flushdb -# r set x 10 -# r del x -# r randomkey -# } {} - -# test {GETSET (set new value)} { -# list [r getset foo xyz] [r get foo] -# } {{} xyz} - - test {GETSET (replace old value)} { - r set foo bar - list [r getset foo xyz] [r get foo] - } {bar xyz} - - test {MSET base case} { - r mset x 10 y "foo bar" z "x x x x x x x\n\n\r\n" - r mget x y z - } [list 10 {foo bar} "x x x x x x x\n\n\r\n"] - - test {MSET wrong number of args} { - catch {r mset x 10 y "foo bar" z} err - format $err - } {*wrong number*} - - test {MSETNX with already existent key} { - list [r msetnx x1 xxx y2 yyy x 20] [r exists x1] [r exists y2] - } {0 0 0} - - test {MSETNX with not existing keys} { - list [r msetnx x1 xxx y2 yyy] [r get x1] [r get y2] - } {1 xxx yyy} - - test "STRLEN against non-existing key" { - assert_equal 0 [r strlen notakey] - } - - test "STRLEN against integer-encoded value" { - r set myinteger -555 - assert_equal 4 [r strlen myinteger] - } - - test "STRLEN against plain string" { - r set mystring "foozzz0123456789 baz" - assert_equal 20 [r strlen mystring] - } - - test "SETBIT against non-existing key" { - r del mykey - assert_equal 0 [r setbit mykey 1 1] - assert_equal [binary format B* 01000000] [r get mykey] - } - - test "SETBIT against string-encoded key" { - # Ascii "@" is integer 64 = 01 00 00 00 - r set mykey "@" - - assert_equal 0 [r setbit mykey 2 1] - assert_equal [binary format B* 01100000] [r get mykey] - assert_equal 1 [r setbit mykey 1 0] - assert_equal [binary format B* 00100000] [r get mykey] - } - -# test "SETBIT against integer-encoded key" { -# # Ascii "1" is integer 49 = 00 11 00 01 -# r set mykey 1 -# assert_encoding int mykey -# -# assert_equal 0 [r setbit mykey 6 1] -# assert_equal [binary format B* 00110011] [r get mykey] -# assert_equal 1 [r setbit mykey 2 0] -# assert_equal [binary format B* 00010011] [r get mykey] -# } - -# test "SETBIT against key with wrong type" { -# r del mykey -# r lpush mykey "foo" -# assert_error "WRONGTYPE*" {r setbit mykey 0 1} -# } - - test "SETBIT with out of range bit offset" { - r del mykey - assert_error "*out of range*" {r setbit mykey [expr 4*1024*1024*1024] 1} - assert_error "*out of range*" {r setbit mykey -1 1} - } - - test "SETBIT with non-bit argument" { - r del mykey - assert_error "*out of range*" {r setbit mykey 0 -1} - assert_error "*out of range*" {r setbit mykey 0 2} - assert_error "*out of range*" {r setbit mykey 0 10} - assert_error "*out of range*" {r setbit mykey 0 20} - } - -# test "SETBIT fuzzing" { -# set str "" -# set len [expr 256*8] -# r del mykey -# -# for {set i 0} {$i < 2000} {incr i} { -# set bitnum [randomInt $len] -# set bitval [randomInt 2] -# set fmt [format "%%-%ds%%d%%-s" $bitnum] -# set head [string range $str 0 $bitnum-1] -# set tail [string range $str $bitnum+1 end] -# set str [string map {" " 0} [format $fmt $head $bitval $tail]] -# -# r setbit mykey $bitnum $bitval -# assert_equal [binary format B* $str] [r get mykey] -# } -# } - - test "GETBIT against non-existing key" { - r del mykey - assert_equal 0 [r getbit mykey 0] - } - - test "GETBIT against string-encoded key" { - # Single byte with 2nd and 3rd bit set - r set mykey "`" - - # In-range - assert_equal 0 [r getbit mykey 0] - assert_equal 1 [r getbit mykey 1] - assert_equal 1 [r getbit mykey 2] - assert_equal 0 [r getbit mykey 3] - - # Out-range - assert_equal 0 [r getbit mykey 8] - assert_equal 0 [r getbit mykey 100] - assert_equal 0 [r getbit mykey 10000] - } - -# test "GETBIT against integer-encoded key" { -# r set mykey 1 -# assert_encoding int mykey -# -# # Ascii "1" is integer 49 = 00 11 00 01 -# assert_equal 0 [r getbit mykey 0] -# assert_equal 0 [r getbit mykey 1] -# assert_equal 1 [r getbit mykey 2] -# assert_equal 1 [r getbit mykey 3] -# -# # Out-range -# assert_equal 0 [r getbit mykey 8] -# assert_equal 0 [r getbit mykey 100] -# assert_equal 0 [r getbit mykey 10000] -# } -# -# test "SETRANGE against non-existing key" { -# r del mykey -# assert_equal 3 [r setrange mykey 0 foo] -# assert_equal "foo" [r get mykey] -# -# r del mykey -# assert_equal 0 [r setrange mykey 0 ""] -# assert_equal 0 [r exists mykey] -# -# r del mykey -# assert_equal 4 [r setrange mykey 1 foo] -# assert_equal "\000foo" [r get mykey] -# } - - test "SETRANGE against string-encoded key" { - r set mykey "foo" - assert_equal 3 [r setrange mykey 0 b] - assert_equal "boo" [r get mykey] - - r set mykey "foo" - assert_equal 3 [r setrange mykey 0 ""] - assert_equal "foo" [r get mykey] - - r set mykey "foo" - assert_equal 3 [r setrange mykey 1 b] - assert_equal "fbo" [r get mykey] - - r set mykey "foo" - assert_equal 7 [r setrange mykey 4 bar] - assert_equal "foo\000bar" [r get mykey] - } - -# test "SETRANGE against integer-encoded key" { -# r set mykey 1234 -# assert_encoding int mykey -# assert_equal 4 [r setrange mykey 0 2] -# assert_encoding raw mykey -# assert_equal 2234 [r get mykey] -# -# # Shouldn't change encoding when nothing is set -# r set mykey 1234 -# assert_encoding int mykey -# assert_equal 4 [r setrange mykey 0 ""] -# assert_encoding int mykey -# assert_equal 1234 [r get mykey] -# -# r set mykey 1234 -# assert_encoding int mykey -# assert_equal 4 [r setrange mykey 1 3] -# assert_encoding raw mykey -# assert_equal 1334 [r get mykey] -# -# r set mykey 1234 -# assert_encoding int mykey -# assert_equal 6 [r setrange mykey 5 2] -# assert_encoding raw mykey -# assert_equal "1234\0002" [r get mykey] -# } - -# test "SETRANGE against key with wrong type" { -# r del mykey -# r lpush mykey "foo" -# assert_error "WRONGTYPE*" {r setrange mykey 0 bar} -# } - -# test "SETRANGE with out of range offset" { -# r del mykey -# assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} -# -# r set mykey "hello" -# assert_error "*out of range*" {r setrange mykey -1 world} -# assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} -# } - - test "GETRANGE against non-existing key" { - r del mykey - assert_equal "" [r getrange mykey 0 -1] - } - - test "GETRANGE against string value" { - r set mykey "Hello World" - assert_equal "Hell" [r getrange mykey 0 3] - assert_equal "Hello World" [r getrange mykey 0 -1] - assert_equal "orld" [r getrange mykey -4 -1] - assert_equal "" [r getrange mykey 5 3] - assert_equal " World" [r getrange mykey 5 5000] - assert_equal "Hello World" [r getrange mykey -5000 10000] - } - - test "GETRANGE against integer-encoded value" { - r set mykey 1234 - assert_equal "123" [r getrange mykey 0 2] - assert_equal "1234" [r getrange mykey 0 -1] - assert_equal "234" [r getrange mykey -3 -1] - assert_equal "" [r getrange mykey 5 3] - assert_equal "4" [r getrange mykey 3 5000] - assert_equal "1234" [r getrange mykey -5000 10000] - } - -# test "GETRANGE fuzzing" { -# for {set i 0} {$i < 1000} {incr i} { -# r set bin [set bin [randstring 0 1024 binary]] -# set _start [set start [randomInt 1500]] -# set _end [set end [randomInt 1500]] -# if {$_start < 0} {set _start "end-[abs($_start)-1]"} -# if {$_end < 0} {set _end "end-[abs($_end)-1]"} -# assert_equal [string range $bin $_start $_end] [r getrange bin $start $end] -# } -# } - - test {Extended SET can detect syntax errors} { - set e {} - catch {r set foo bar non-existing-option} e - set e - } {*syntax*} - - test {Extended SET NX option} { - r del foo - set v1 [r set foo 1 nx] - set v2 [r set foo 2 nx] - list $v1 $v2 [r get foo] - } {OK {} 1} - - test {Extended SET XX option} { - r del foo - set v1 [r set foo 1 xx] - r set foo bar - set v2 [r set foo 2 xx] - list $v1 $v2 [r get foo] - } {{} OK 2} - - test {Extended SET EX option} { - r del foo - r set foo bar ex 10 - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {Extended SET PX option} { - r del foo - r set foo bar px 10000 - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {Extended SET using multiple options at once} { - r set foo val - assert {[r set foo bar xx px 10000] eq {OK}} - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - -# test {KEYS * two times with long key, Github issue #1208} { -# r flushdb -# r set dlskeriewrioeuwqoirueioqwrueoqwrueqw test -# r keys * -# r keys * -# } {dlskeriewrioeuwqoirueioqwrueoqwrueqw} - - test {GETRANGE with huge ranges, Github issue #1844} { - r set foo bar - r getrange foo 0 4294967297 - } {bar} -} diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl deleted file mode 100644 index 6ddae9170..000000000 --- a/tests/unit/bitops.tcl +++ /dev/null @@ -1,341 +0,0 @@ -# Compare Redis commadns against Tcl implementations of the same commands. -proc count_bits s { - binary scan $s b* bits - string length [regsub -all {0} $bits {}] -} - -proc simulate_bit_op {op args} { - set maxlen 0 - set j 0 - set count [llength $args] - foreach a $args { - binary scan $a b* bits - set b($j) $bits - if {[string length $bits] > $maxlen} { - set maxlen [string length $bits] - } - incr j - } - for {set j 0} {$j < $count} {incr j} { - if {[string length $b($j)] < $maxlen} { - append b($j) [string repeat 0 [expr $maxlen-[string length $b($j)]]] - } - } - set out {} - for {set x 0} {$x < $maxlen} {incr x} { - set bit [string range $b(0) $x $x] - if {$op eq {not}} {set bit [expr {!$bit}]} - for {set j 1} {$j < $count} {incr j} { - set bit2 [string range $b($j) $x $x] - switch $op { - and {set bit [expr {$bit & $bit2}]} - or {set bit [expr {$bit | $bit2}]} - xor {set bit [expr {$bit ^ $bit2}]} - } - } - append out $bit - } - binary format b* $out -} - -start_server {tags {"bitops"}} { - test {BITCOUNT returns 0 against non existing key} { - r bitcount no-key - } 0 - -# catch {unset num} -# foreach vec [list "" "\xaa" "\x00\x00\xff" "foobar" "123"] { -# incr num -# test "BITCOUNT against test vector #$num" { -# r set str $vec -# assert {[r bitcount str] == [count_bits $vec]} -# } -# } - -# test {BITCOUNT fuzzing without start/end} { -# for {set j 0} {$j < 100} {incr j} { -# set str [randstring 0 3000] -# r set str $str -# assert {[r bitcount str] == [count_bits $str]} -# } -# } - -# test {BITCOUNT fuzzing with start/end} { -# for {set j 0} {$j < 100} {incr j} { -# set str [randstring 0 3000] -# r set str $str -# set l [string length $str] -# set start [randomInt $l] -# set end [randomInt $l] -# if {$start > $end} { -# lassign [list $end $start] start end -# } -# assert {[r bitcount str $start $end] == [count_bits [string range $str $start $end]]} -# } -# } - - test {BITCOUNT with start, end} { - r set s "foobar" - assert_equal [r bitcount s 0 -1] [count_bits "foobar"] - assert_equal [r bitcount s 1 -2] [count_bits "ooba"] - assert_equal [r bitcount s -2 1] [count_bits ""] - assert_equal [r bitcount s 0 1000] [count_bits "foobar"] - } - - test {BITCOUNT syntax error #1} { - catch {r bitcount s 0} e - set e - } {ERR*syntax*} - - test {BITCOUNT regression test for github issue #582} { - r del str - r setbit foo 0 1 - if {[catch {r bitcount foo 0 4294967296} e]} { - assert_match {*ERR*out of range*} $e - set _ 1 - } else { - set e - } - } {1} - - test {BITCOUNT misaligned prefix} { - r del str - r set str ab - r bitcount str 1 -1 - } {3} - - test {BITCOUNT misaligned prefix + full words + remainder} { - r del str - r set str __PPxxxxxxxxxxxxxxxxRR__ - r bitcount str 2 -3 - } {74} - - test {BITOP NOT (empty string)} { - r set s "" - r bitop not dest s - r get dest - } {} - - test {BITOP NOT (known string)} { - r set s "\xaa\x00\xff\x55" - r bitop not dest s - r get dest - } "\x55\xff\x00\xaa" - - test {BITOP where dest and target are the same key} { - r set s "\xaa\x00\xff\x55" - r bitop not s s - r get s - } "\x55\xff\x00\xaa" - - test {BITOP AND|OR|XOR don't change the string with single input key} { - r set a "\x01\x02\xff" - r bitop and res1 a - r bitop or res2 a - r bitop xor res3 a - list [r get res1] [r get res2] [r get res3] - } [list "\x01\x02\xff" "\x01\x02\xff" "\x01\x02\xff"] - - test {BITOP missing key is considered a stream of zero} { - r set a "\x01\x02\xff" - r bitop and res1 no-suck-key a - r bitop or res2 no-suck-key a no-such-key - r bitop xor res3 no-such-key a - list [r get res1] [r get res2] [r get res3] - } [list "\x00\x00\x00" "\x01\x02\xff" "\x01\x02\xff"] - - test {BITOP shorter keys are zero-padded to the key with max length} { - r set a "\x01\x02\xff\xff" - r set b "\x01\x02\xff" - r bitop and res1 a b - r bitop or res2 a b - r bitop xor res3 a b - list [r get res1] [r get res2] [r get res3] - } [list "\x01\x02\xff\x00" "\x01\x02\xff\xff" "\x00\x00\x00\xff"] - - foreach op {and or xor} { - test "BITOP $op fuzzing" { - for {set i 0} {$i < 10} {incr i} { - r flushall - set vec {} - set veckeys {} - set numvec [expr {[randomInt 10]+1}] - for {set j 0} {$j < $numvec} {incr j} { - set str [randstring 0 1000] - lappend vec $str - lappend veckeys vector_$j - r set vector_$j $str - } - r bitop $op target {*}$veckeys - assert_equal [r get target] [simulate_bit_op $op {*}$vec] - } - } - } - - test {BITOP NOT fuzzing} { - for {set i 0} {$i < 10} {incr i} { - r flushall - set str [randstring 0 1000] - r set str $str - r bitop not target str - assert_equal [r get target] [simulate_bit_op not $str] - } - } - - test {BITOP with integer encoded source objects} { - r set a 1 - r set b 2 - r bitop xor dest a b a - r get dest - } {2} - -# test {BITOP with non string source key} { -# r del c -# r set a 1 -# r set b 2 -# r lpush c foo -# catch {r bitop xor dest a b c d} e -# set e -# } {WRONGTYPE*} - - test {BITOP with empty string after non empty string (issue #529)} { - r flushdb - r set a "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - r bitop or x a b - } {32} - -# test {BITPOS bit=0 with empty key returns 0} { -# r del str -# r bitpos str 0 -# } {0} - -# test {BITPOS bit=1 with empty key returns -1} { -# r del str -# r bitpos str 1 -# } {-1} -# -# test {BITPOS bit=0 with string less than 1 word works} { -# r set str "\xff\xf0\x00" -# r bitpos str 0 -# } {12} -# -# test {BITPOS bit=1 with string less than 1 word works} { -# r set str "\x00\x0f\x00" -# r bitpos str 1 -# } {12} -# -# test {BITPOS bit=0 starting at unaligned address} { -# r set str "\xff\xf0\x00" -# r bitpos str 0 1 -# } {12} -# -# test {BITPOS bit=1 starting at unaligned address} { -# r set str "\x00\x0f\xff" -# r bitpos str 1 1 -# } {12} -# -# test {BITPOS bit=0 unaligned+full word+reminder} { -# r del str -# r set str "\xff\xff\xff" ; # Prefix -# # Followed by two (or four in 32 bit systems) full words -# r append str "\xff\xff\xff\xff\xff\xff\xff\xff" -# r append str "\xff\xff\xff\xff\xff\xff\xff\xff" -# r append str "\xff\xff\xff\xff\xff\xff\xff\xff" -# # First zero bit. -# r append str "\x0f" -# assert {[r bitpos str 0] == 216} -# assert {[r bitpos str 0 1] == 216} -# assert {[r bitpos str 0 2] == 216} -# assert {[r bitpos str 0 3] == 216} -# assert {[r bitpos str 0 4] == 216} -# assert {[r bitpos str 0 5] == 216} -# assert {[r bitpos str 0 6] == 216} -# assert {[r bitpos str 0 7] == 216} -# assert {[r bitpos str 0 8] == 216} -# } -# -# test {BITPOS bit=1 unaligned+full word+reminder} { -# r del str -# r set str "\x00\x00\x00" ; # Prefix -# # Followed by two (or four in 32 bit systems) full words -# r append str "\x00\x00\x00\x00\x00\x00\x00\x00" -# r append str "\x00\x00\x00\x00\x00\x00\x00\x00" -# r append str "\x00\x00\x00\x00\x00\x00\x00\x00" -# # First zero bit. -# r append str "\xf0" -# assert {[r bitpos str 1] == 216} -# assert {[r bitpos str 1 1] == 216} -# assert {[r bitpos str 1 2] == 216} -# assert {[r bitpos str 1 3] == 216} -# assert {[r bitpos str 1 4] == 216} -# assert {[r bitpos str 1 5] == 216} -# assert {[r bitpos str 1 6] == 216} -# assert {[r bitpos str 1 7] == 216} -# assert {[r bitpos str 1 8] == 216} -# } -# -# test {BITPOS bit=1 returns -1 if string is all 0 bits} { -# r set str "" -# for {set j 0} {$j < 20} {incr j} { -# assert {[r bitpos str 1] == -1} -# r append str "\x00" -# } -# } -# -# test {BITPOS bit=0 works with intervals} { -# r set str "\x00\xff\x00" -# assert {[r bitpos str 0 0 -1] == 0} -# assert {[r bitpos str 0 1 -1] == 16} -# assert {[r bitpos str 0 2 -1] == 16} -# assert {[r bitpos str 0 2 200] == 16} -# assert {[r bitpos str 0 1 1] == -1} -# } -# -# test {BITPOS bit=1 works with intervals} { -# r set str "\x00\xff\x00" -# assert {[r bitpos str 1 0 -1] == 8} -# assert {[r bitpos str 1 1 -1] == 8} -# assert {[r bitpos str 1 2 -1] == -1} -# assert {[r bitpos str 1 2 200] == -1} -# assert {[r bitpos str 1 1 1] == 8} -# } -# -# test {BITPOS bit=0 changes behavior if end is given} { -# r set str "\xff\xff\xff" -# assert {[r bitpos str 0] == 24} -# assert {[r bitpos str 0 0] == 24} -# assert {[r bitpos str 0 0 -1] == -1} -# } -# -# test {BITPOS bit=1 fuzzy testing using SETBIT} { -# r del str -# set max 524288; # 64k -# set first_one_pos -1 -# for {set j 0} {$j < 1000} {incr j} { -# assert {[r bitpos str 1] == $first_one_pos} -# set pos [randomInt $max] -# r setbit str $pos 1 -# if {$first_one_pos == -1 || $first_one_pos > $pos} { -# # Update the position of the first 1 bit in the array -# # if the bit we set is on the left of the previous one. -# set first_one_pos $pos -# } -# } -# } -# -# test {BITPOS bit=0 fuzzy testing using SETBIT} { -# set max 524288; # 64k -# set first_zero_pos $max -# r set str [string repeat "\xff" [expr $max/8]] -# for {set j 0} {$j < 1000} {incr j} { -# assert {[r bitpos str 0] == $first_zero_pos} -# set pos [randomInt $max] -# r setbit str $pos 0 -# if {$first_zero_pos > $pos} { -# # Update the position of the first 0 bit in the array -# # if the bit we clear is on the left of the previous one. -# set first_zero_pos $pos -# } -# } -# } -} diff --git a/tests/unit/command.tcl b/tests/unit/command.tcl deleted file mode 100644 index a647b42b7..000000000 --- a/tests/unit/command.tcl +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -start_server {tags {"command"}} { - test "Command docs supported." { - set doc [r command docs set] - # puts $doc - assert [dict exists $doc set] - } -} diff --git a/tests/unit/dump.tcl b/tests/unit/dump.tcl deleted file mode 100644 index b79c3ba9d..000000000 --- a/tests/unit/dump.tcl +++ /dev/null @@ -1,142 +0,0 @@ -start_server {tags {"dump"}} { - test {DUMP / RESTORE are able to serialize / unserialize a simple key} { - r set foo bar - set encoded [r dump foo] - r del foo - list [r exists foo] [r restore foo 0 $encoded] [r ttl foo] [r get foo] - } {0 OK -1 bar} - - test {RESTORE can set an arbitrary expire to the materialized key} { - r set foo bar - set encoded [r dump foo] - r del foo - r restore foo 5000 $encoded - set ttl [r pttl foo] - assert {$ttl >= 3000 && $ttl <= 5000} - r get foo - } {bar} - - test {RESTORE can set an expire that overflows a 32 bit integer} { - r set foo bar - set encoded [r dump foo] - r del foo - r restore foo 2569591501 $encoded - set ttl [r pttl foo] - assert {$ttl >= (2569591501-3000) && $ttl <= 2569591501} - r get foo - } {bar} - - test {RESTORE returns an error of the key already exists} { - r set foo bar - set e {} - catch {r restore foo 0 "..."} e - set e - } {*is busy*} - - test {DUMP of non existing key returns nil} { - r dump nonexisting_key - } {} - - test {MIGRATE is able to migrate a key between two instances} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 5000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second get key] eq {Some Value}} - assert {[$second ttl key] == -1} - } - } - - test {MIGRATE propagates TTL correctly} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - $first expire key 10 - set ret [r -1 migrate $second_host $second_port key 9 5000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second get key] eq {Some Value}} - assert {[$second ttl key] >= 7 && [$second ttl key] <= 10} - } - } - - test {MIGRATE can correctly transfer large values} { - set first [srv 0 client] - r del key - for {set j 0} {$j < 5000} {incr j} { - r rpush key 1 2 3 4 5 6 7 8 9 10 - r rpush key "item 1" "item 2" "item 3" "item 4" "item 5" \ - "item 6" "item 7" "item 8" "item 9" "item 10" - } - assert {[string length [r dump key]] > (1024*64)} - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 10000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second ttl key] == -1} - assert {[$second llen key] == 5000*20} - } - } - - test {MIGRATE can correctly transfer hashes} { - set first [srv 0 client] - r del key - r hmset key field1 "item 1" field2 "item 2" field3 "item 3" \ - field4 "item 4" field5 "item 5" field6 "item 6" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 10000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second ttl key] == -1} - } - } - - test {MIGRATE timeout actually works} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - - set rd [redis_deferring_client] - $rd debug sleep 5.0 ; # Make second server unable to reply. - set e {} - catch {r -1 migrate $second_host $second_port key 9 1000} e - assert_match {IOERR*} $e - } - } -} diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl deleted file mode 100644 index e1474def3..000000000 --- a/tests/unit/expire.tcl +++ /dev/null @@ -1,201 +0,0 @@ -start_server {tags {"expire"}} { - test {EXPIRE - set timeouts multiple times} { - r set x foobar - set v1 [r expire x 5] - set v2 [r ttl x] - set v3 [r expire x 10] - set v4 [r ttl x] - r expire x 2 - list $v1 $v2 $v3 $v4 - } {1 [45] 1 10} - - test {EXPIRE - It should be still possible to read 'x'} { - r get x - } {foobar} - -# tags {"slow"} { -# test {EXPIRE - After 2.1 seconds the key should no longer be here} { -# after 2100 -# list [r get x] [r exists x] -# } {{} 0} -# } - - test {EXPIRE - write on expire should work} { - r del x - r lpush x foo - r expire x 1000 - r lpush x bar - r lrange x 0 -1 - } {bar foo} - - test {EXPIREAT - Check for EXPIRE alike behavior} { - r del x - r set x foo - r expireat x [expr [clock seconds]+15] - r ttl x - } {1[345]} - - test {SETEX - Set + Expire combo operation. Check for TTL} { - r setex x 12 test - r ttl x - } {1[012]} - - test {SETEX - Check value} { - r get x - } {test} - - test {SETEX - Overwrite old key} { - r setex y 1 foo - r get y - } {foo} - -# tags {"slow"} { -# test {SETEX - Wait for the key to expire} { -# after 1100 -# r get y -# } {} -# } - - test {SETEX - Wrong time parameter} { - catch {r setex z -10 foo} e - set _ $e - } {*invalid expire*} - - test {PERSIST can undo an EXPIRE} { - r set x foo - r expire x 50 - list [r ttl x] [r persist x] [r ttl x] [r get x] - } {50 1 -1 foo} - - test {PERSIST returns 0 against non existing or non volatile keys} { - r set x foo - list [r persist foo] [r persist nokeyatall] - } {0 0} - - test {EXPIRE pricision is now the millisecond} { - # This test is very likely to do a false positive if the - # server is under pressure, so if it does not work give it a few more - # chances. - for {set j 0} {$j < 3} {incr j} { - r del x - r setex x 1 somevalue - after 900 - set a [r get x] - after 1100 - set b [r get x] - if {$a eq {somevalue} && $b eq {}} break - } - list $a $b - } {somevalue {}} - -# test {PEXPIRE/PSETEX/PEXPIREAT can set sub-second expires} { -# # This test is very likely to do a false positive if the -# # server is under pressure, so if it does not work give it a few more -# # chances. -# for {set j 0} {$j < 3} {incr j} { -# r del x y z -# r psetex x 100 somevalue -# after 80 -# set a [r get x] -# after 120 -# set b [r get x] -# -# r set x somevalue -# r pexpire x 100 -# after 80 -# set c [r get x] -# after 120 -# set d [r get x] -# -# r set x somevalue -# r pexpireat x [expr ([clock seconds]*1000)+100] -# after 80 -# set e [r get x] -# after 120 -# set f [r get x] -# -# if {$a eq {somevalue} && $b eq {} && -# $c eq {somevalue} && $d eq {} && -# $e eq {somevalue} && $f eq {}} break -# } -# list $a $b -# } {somevalue {}} - - test {TTL returns tiem to live in seconds} { - r del x - r setex x 10 somevalue - set ttl [r ttl x] - assert {$ttl > 8 && $ttl <= 10} - } - - test {PTTL returns time to live in milliseconds} { - r del x - r setex x 1 somevalue - set ttl [r pttl x] - assert {$ttl > 900 && $ttl <= 1000} - } - - test {TTL / PTTL return -1 if key has no expire} { - r del x - r set x hello - list [r ttl x] [r pttl x] - } {-1 -1} - - test {TTL / PTTL return -2 if key does not exit} { - r del x - list [r ttl x] [r pttl x] - } {-2 -2} - -# test {Redis should actively expire keys incrementally} { -# r flushdb -# r psetex key1 500 a -# r psetex key2 500 a -# r psetex key3 500 a -# set size1 [r dbsize] -# # Redis expires random keys ten times every second so we are -# # fairly sure that all the three keys should be evicted after -# # one second. -# after 1000 -# set size2 [r dbsize] -# list $size1 $size2 -# } {3 0} - -# test {Redis should lazy expire keys} { -# r flushdb -# r debug set-active-expire 0 -# r psetex key1 500 a -# r psetex key2 500 a -# r psetex key3 500 a -# set size1 [r dbsize] -# # Redis expires random keys ten times every second so we are -# # fairly sure that all the three keys should be evicted after -# # one second. -# after 1000 -# set size2 [r dbsize] -# r mget key1 key2 key3 -# set size3 [r dbsize] -# r debug set-active-expire 1 -# list $size1 $size2 $size3 -# } {3 3 0} -# -# test {EXPIRE should not resurrect keys (issue #1026)} { -# r debug set-active-expire 0 -# r set foo bar -# r pexpire foo 500 -# after 1000 -# r expire foo 10 -# r debug set-active-expire 1 -# r exists foo -# } {0} - - test {5 keys in, 5 keys out} { - r flushdb - r set a c - r expire a 5 - r set t c - r set e c - r set s c - r set foo b - lsort [r keys *] - } {a e foo s t} -} diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl deleted file mode 100644 index 7ed871098..000000000 --- a/tests/unit/geo.tcl +++ /dev/null @@ -1,311 +0,0 @@ -# Helper functions to simulate search-in-radius in the Tcl side in order to -# verify the Redis implementation with a fuzzy test. -proc geo_degrad deg {expr {$deg*atan(1)*8/360}} - -proc geo_distance {lon1d lat1d lon2d lat2d} { - set lon1r [geo_degrad $lon1d] - set lat1r [geo_degrad $lat1d] - set lon2r [geo_degrad $lon2d] - set lat2r [geo_degrad $lat2d] - set v [expr {sin(($lon2r - $lon1r) / 2)}] - set u [expr {sin(($lat2r - $lat1r) / 2)}] - expr {2.0 * 6372797.560856 * \ - asin(sqrt($u * $u + cos($lat1r) * cos($lat2r) * $v * $v))} -} - -proc geo_random_point {lonvar latvar} { - upvar 1 $lonvar lon - upvar 1 $latvar lat - # Note that the actual latitude limit should be -85 to +85, we restrict - # the test to -70 to +70 since in this range the algorithm is more precise - # while outside this range occasionally some element may be missing. - set lon [expr {-180 + rand()*360}] - set lat [expr {-70 + rand()*140}] -} - -# Return elements non common to both the lists. -# This code is from http://wiki.tcl.tk/15489 -proc compare_lists {List1 List2} { - set DiffList {} - foreach Item $List1 { - if {[lsearch -exact $List2 $Item] == -1} { - lappend DiffList $Item - } - } - foreach Item $List2 { - if {[lsearch -exact $List1 $Item] == -1} { - if {[lsearch -exact $DiffList $Item] == -1} { - lappend DiffList $Item - } - } - } - return $DiffList -} - -# The following list represents sets of random seed, search position -# and radius that caused bugs in the past. It is used by the randomized -# test later as a starting point. When the regression vectors are scanned -# the code reverts to using random data. -# -# The format is: seed km lon lat -set regression_vectors { - {1482225976969 7083 81.634948934258375 30.561509253718668} - {1482340074151 5416 -70.863281847379767 -46.347003465679947} - {1499014685896 6064 -89.818768962202014 -40.463868561416803} - {1412 156 149.29737817929004 15.95807862745508} - {441574 143 59.235461856813856 66.269555127373678} - {160645 187 -101.88575239939883 49.061997951502917} - {750269 154 -90.187939661642517 66.615930412251487} - {342880 145 163.03472387745728 64.012747720821181} - {729955 143 137.86663517256579 63.986745399416776} - {939895 151 59.149620271823181 65.204186651485145} - {1412 156 149.29737817929004 15.95807862745508} - {564862 149 84.062063109158544 -65.685403922426232} -} -set rv_idx 0 - -start_server {tags {"geo"}} { - test {GEOADD create} { - r geoadd nyc -73.9454966 40.747533 "lic market" - } {1} - - test {GEOADD update} { - r geoadd nyc -73.9454966 40.747533 "lic market" - } {0} - - test {GEOADD invalid coordinates} { - catch { - r geoadd nyc -73.9454966 40.747533 "lic market" \ - foo bar "luck market" - } err - set err - } {*valid*} - - test {GEOADD multi add} { - r geoadd nyc -73.9733487 40.7648057 "central park n/q/r" -73.9903085 40.7362513 "union square" -74.0131604 40.7126674 "wtc one" -73.7858139 40.6428986 "jfk" -73.9375699 40.7498929 "q4" -73.9564142 40.7480973 4545 - } {6} - - test {Check geoset values} { - r zrange nyc 0 -1 withscores - } {{wtc one} 1791873972053020 {union square} 1791875485187452 {central park n/q/r} 1791875761332224 4545 1791875796750882 {lic market} 1791875804419201 q4 1791875830079666 jfk 1791895905559723} - - test {GEORADIUS simple (sorted)} { - r georadius nyc -73.9798091 40.7598464 3 km asc - } {{central park n/q/r} 4545 {union square}} - - test {GEORADIUS withdist (sorted)} { - r georadius nyc -73.9798091 40.7598464 3 km withdist asc - } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}} - - test {GEORADIUS with COUNT} { - r georadius nyc -73.9798091 40.7598464 10 km COUNT 3 - } {{wtc one} {union square} {central park n/q/r}} - - test {GEORADIUS with COUNT but missing integer argument} { - catch {r georadius nyc -73.9798091 40.7598464 10 km COUNT} e - set e - } {ERR*syntax*} - - test {GEORADIUS with COUNT DESC} { - r georadius nyc -73.9798091 40.7598464 10 km COUNT 2 DESC - } {{wtc one} q4} - - test {GEORADIUS HUGE, issue #2767} { - r geoadd users -47.271613776683807 -54.534504198047678 user_000000 - llength [r GEORADIUS users 0 0 50000 km WITHCOORD] - } {1} - - test {GEORADIUSBYMEMBER simple (sorted)} { - r georadiusbymember nyc "wtc one" 7 km - } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} - - test {GEORADIUSBYMEMBER withdist (sorted)} { - r georadiusbymember nyc "wtc one" 7 km withdist - } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}} - - test {GEOHASH is able to return geohash strings} { - # Example from Wikipedia. - r del points - r geoadd points -5.6 42.6 test - lindex [r geohash points test] 0 - } {ezs42e44yx0} - - test {GEOPOS simple} { - r del points - r geoadd points 10 20 a 30 40 b - lassign [lindex [r geopos points a b] 0] x1 y1 - lassign [lindex [r geopos points a b] 1] x2 y2 - assert {abs($x1 - 10) < 0.001} - assert {abs($y1 - 20) < 0.001} - assert {abs($x2 - 30) < 0.001} - assert {abs($y2 - 40) < 0.001} - } - - test {GEOPOS missing element} { - r del points - r geoadd points 10 20 a 30 40 b - lindex [r geopos points a x b] 1 - } {} - - test {GEODIST simple & unit} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - set m [r geodist points Palermo Catania] - assert {$m > 166274 && $m < 166275} - set km [r geodist points Palermo Catania km] - assert {$km > 166.2 && $km < 166.3} - } - - test {GEODIST missing elements} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - set m [r geodist points Palermo Agrigento] - assert {$m eq {}} - set m [r geodist points Ragusa Agrigento] - assert {$m eq {}} - set m [r geodist empty_key Palermo Catania] - assert {$m eq {}} - } - - test {GEORADIUS STORE option: syntax error} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - catch {r georadius points 13.361389 38.115556 50 km store} e - set e - } {*ERR*syntax*} - - test {GEORANGE STORE option: incompatible options} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - catch {r georadius points 13.361389 38.115556 50 km store points2 withdist} e - assert_match {*ERR*} $e - catch {r georadius points 13.361389 38.115556 50 km store points2 withhash} e - assert_match {*ERR*} $e - catch {r georadius points 13.361389 38.115556 50 km store points2 withcoords} e - assert_match {*ERR*} $e - } - - test {GEORANGE STORE option: plain usage} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km store points2 - assert_equal [r zrange points 0 -1] [r zrange points2 0 -1] - } - - test {GEORANGE STOREDIST option: plain usage} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km storedist points2 - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 1] < 1} - assert {[lindex $res 3] > 166} - assert {[lindex $res 3] < 167} - } - - test {GEORANGE STOREDIST option: COUNT ASC and DESC} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km storedist points2 asc count 1 - assert {[r zcard points2] == 1} - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 0] eq "Palermo"} - - r georadius points 13.361389 38.115556 500 km storedist points2 desc count 1 - assert {[r zcard points2] == 1} - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 0] eq "Catania"} - } - - test {GEOADD + GEORANGE randomized test} { - set attempt 30 - while {[incr attempt -1]} { - set rv [lindex $regression_vectors $rv_idx] - incr rv_idx - - unset -nocomplain debuginfo - set srand_seed [clock milliseconds] - if {$rv ne {}} {set srand_seed [lindex $rv 0]} - lappend debuginfo "srand_seed is $srand_seed" - expr {srand($srand_seed)} ; # If you need a reproducible run - r del mypoints - - if {[randomInt 10] == 0} { - # From time to time use very big radiuses - set radius_km [expr {[randomInt 50000]+10}] - } else { - # Normally use a few - ~200km radiuses to stress - # test the code the most in edge cases. - set radius_km [expr {[randomInt 200]+10}] - } - if {$rv ne {}} {set radius_km [lindex $rv 1]} - set radius_m [expr {$radius_km*1000}] - geo_random_point search_lon search_lat - if {$rv ne {}} { - set search_lon [lindex $rv 2] - set search_lat [lindex $rv 3] - } - lappend debuginfo "Search area: $search_lon,$search_lat $radius_km km" - set tcl_result {} - set argv {} - for {set j 0} {$j < 20000} {incr j} { - geo_random_point lon lat - lappend argv $lon $lat "place:$j" - set distance [geo_distance $lon $lat $search_lon $search_lat] - if {$distance < $radius_m} { - lappend tcl_result "place:$j" - } - lappend debuginfo "place:$j $lon $lat [expr {$distance/1000}] km" - } - r geoadd mypoints {*}$argv - set res [lsort [r georadius mypoints $search_lon $search_lat $radius_km km]] - set res2 [lsort $tcl_result] - set test_result OK - - if {$res != $res2} { - set rounding_errors 0 - set diff [compare_lists $res $res2] - foreach place $diff { - set mydist [geo_distance $lon $lat $search_lon $search_lat] - set mydist [expr $mydist/1000] - if {($mydist / $radius_km) > 0.999} {incr rounding_errors} - } - # Make sure this is a real error and not a rounidng issue. - if {[llength $diff] == $rounding_errors} { - set res $res2; # Error silenced - } - } - - if {$res != $res2} { - set diff [compare_lists $res $res2] - puts "*** Possible problem in GEO radius query ***" - puts "Redis: $res" - puts "Tcl : $res2" - puts "Diff : $diff" - puts [join $debuginfo "\n"] - foreach place $diff { - if {[lsearch -exact $res2 $place] != -1} { - set where "(only in Tcl)" - } else { - set where "(only in Redis)" - } - lassign [lindex [r geopos mypoints $place] 0] lon lat - set mydist [geo_distance $lon $lat $search_lon $search_lat] - set mydist [expr $mydist/1000] - puts "$place -> [r geopos mypoints $place] $mydist $where" - if {($mydist / $radius_km) > 0.999} {incr rounding_errors} - } - set test_result FAIL - } - unset -nocomplain debuginfo - if {$test_result ne {OK}} break - } - set test_result - } {OK} -} diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl deleted file mode 100755 index c8d56e4ba..000000000 --- a/tests/unit/hyperloglog.tcl +++ /dev/null @@ -1,250 +0,0 @@ -start_server {tags {"hll"}} { -# test {HyperLogLog self test passes} { -# catch {r pfselftest} e -# set e -# } {OK} - - test {PFADD without arguments creates an HLL value} { - r pfadd hll - r exists hll - } {1} - - test {Approximated cardinality after creation is zero} { - r pfcount hll - } {0} - - test {PFADD returns 1 when at least 1 reg was modified} { - r pfadd hll a b c - } {1} - - test {PFADD returns 0 when no reg was modified} { - r pfadd hll a b c - } {0} - - test {PFADD works with empty string (regression)} { - r pfadd hll "" - } - - # Note that the self test stresses much better the - # cardinality estimation error. We are testing just the - # command implementation itself here. - test {PFCOUNT returns approximated cardinality of set} { - r del hll - set res {} - r pfadd hll 1 2 3 4 5 - lappend res [r pfcount hll] - # Call it again to test cached value invalidation. - r pfadd hll 6 7 8 8 9 10 - lappend res [r pfcount hll] - set res - } {5 10} - -# test {HyperLogLogs are promote from sparse to dense} { -# r del hll -# r config set hll-sparse-max-bytes 3000 -# set n 0 -# while {$n < 100000} { -# set elements {} -# for {set j 0} {$j < 100} {incr j} {lappend elements [expr rand()]} -# incr n 100 -# r pfadd hll {*}$elements -# set card [r pfcount hll] -# set err [expr {abs($card-$n)}] -# assert {$err < (double($card)/100)*5} -# if {$n < 1000} { -# assert {[r pfdebug encoding hll] eq {sparse}} -# } elseif {$n > 10000} { -# assert {[r pfdebug encoding hll] eq {dense}} -# } -# } -# } - -# test {HyperLogLog sparse encoding stress test} { -# for {set x 0} {$x < 1000} {incr x} { -# r del hll1 hll2 -# set numele [randomInt 100] -# set elements {} -# for {set j 0} {$j < $numele} {incr j} { -# lappend elements [expr rand()] -# } - # Force dense representation of hll2 -# r pfadd hll2 -# r pfdebug todense hll2 -# r pfadd hll1 {*}$elements -# r pfadd hll2 {*}$elements -# assert {[r pfdebug encoding hll1] eq {sparse}} -# assert {[r pfdebug encoding hll2] eq {dense}} - # Cardinality estimated should match exactly. -# assert {[r pfcount hll1] eq [r pfcount hll2]} -# } -# } - -# test {Corrupted sparse HyperLogLogs are detected: Additionl at tail} { -# r del hll -# r pfadd hll a b c -# r append hll "hello" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*INVALIDOBJ*} - -# test {Corrupted sparse HyperLogLogs are detected: Broken magic} { -# r del hll -# r pfadd hll a b c -# r setrange hll 0 "0123" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {Corrupted sparse HyperLogLogs are detected: Invalid encoding} { -# r del hll -# r pfadd hll a b c -# r setrange hll 4 "x" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {Corrupted dense HyperLogLogs are detected: Wrong length} { -# r del hll -# r pfadd hll a b c -# r setrange hll 4 "\x00" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {PFADD, PFCOUNT, PFMERGE type checking works} { -# r set foo bar -# catch {r pfadd foo 1} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfcount foo} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfmerge bar foo} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfmerge foo bar} e -# assert_match {*WRONGTYPE*} $e -# } - - test {PFMERGE results on the cardinality of union of sets} { - r del hll hll1 hll2 hll3 - r pfadd hll1 a b c - r pfadd hll2 b c d - r pfadd hll3 c d e - r pfmerge hll hll1 hll2 hll3 - r pfcount hll - } {5} - -# test {PFCOUNT multiple-keys merge returns cardinality of union} { -# r del hll1 hll2 hll3 -# for {set x 1} {$x < 100000} {incr x} { -# # Force dense representation of hll2 -# r pfadd hll1 "foo-$x" -# r pfadd hll2 "bar-$x" -# r pfadd hll3 "zap-$x" -# -# set card [r pfcount hll1 hll2 hll3] -# set realcard [expr {$x*3}] -# set err [expr {abs($card-$realcard)}] -# assert {$err < (double($card)/100)*5} -# } -# } - -# test {HYPERLOGLOG press test: 5w, 10w, 15w, 20w, 30w, 50w, 100w} { -# r del hll1 -# for {set x 1} {$x <= 1000000} {incr x} { -# r pfadd hll1 "foo-$x" -# if {$x == 50000} { -# set card [r pfcount hll1] -# set realcard [expr {$x*1}] -# set err [expr {abs($card-$realcard)}] -# -# set d_err [expr {$err * 1.0}] -# set d_realcard [expr {$realcard * 1.0}] -# set err_precentage [expr {double($d_err / $d_realcard)}] -# puts "$x error rate: $err_precentage" -# assert {$err < $realcard * 0.01} -# } -# if {$x == 100000} { -# set card [r pfcount hll1] -# set realcard [expr {$x*1}] -# set err [expr {abs($card-$realcard)}] -# -# set d_err [expr {$err * 1.0}] -# set d_realcard [expr {$realcard * 1.0}] -# set err_precentage [expr {double($d_err / $d_realcard)}] -# puts "$x error rate: $err_precentage" -# assert {$err < $realcard * 0.01} -# } -# if {$x == 150000} { -# set card [r pfcount hll1] -# set realcard [expr {$x*1}] -# set err [expr {abs($card-$realcard)}] -# -# set d_err [expr {$err * 1.0}] -# set d_realcard [expr {$realcard * 1.0}] -# set err_precentage [expr {double($d_err / $d_realcard)}] -# puts "$x error rate: $err_precentage" -# assert {$err < $realcard * 0.01} -# } -# if {$x == 300000} { -# set card [r pfcount hll1] -# set realcard [expr {$x*1}] -# set err [expr {abs($card-$realcard)}] -# -# set d_err [expr {$err * 1.0}] -# set d_realcard [expr {$realcard * 1.0}] -# set err_precentage [expr {double($d_err / $d_realcard)}] -# puts "$x error rate: $err_precentage" -# assert {$err < $realcard * 0.01} -# } -# if {$x == 500000} { -# set card [r pfcount hll1] -# set realcard [expr {$x*1}] -# set err [expr {abs($card-$realcard)}] -# -# set d_err [expr {$err * 1.0}] -# set d_realcard [expr {$realcard * 1.0}] -# set err_precentage [expr {double($d_err / $d_realcard)}] -# puts "$x error rate: $err_precentage" -# assert {$err < $realcard * 0.01} -# } -# if {$x == 1000000} { -# set card [r pfcount hll1] -# set realcard [expr {$x*1}] -# set err [expr {abs($card-$realcard)}] -# -# set d_err [expr {$err * 1.0}] -# set d_realcard [expr {$realcard * 1.0}] -# set err_precentage [expr {double($d_err / $d_realcard)}] -# puts "$x error rate: $err_precentage" -# assert {$err < $realcard * 0.03} -# } -# } -# } - -# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { -# r del hll -# r pfadd hll 1 2 3 -# llength [r pfdebug getreg hll] -# } {16384} - - -# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { -# r del hll -# r pfadd hll 1 2 3 -# llength [r pfdebug getreg hll] -# } {16384} - -# test {PFADD / PFCOUNT cache invalidation works} { -# r del hll -# r pfadd hll a b c -# r pfcount hll -# assert {[r getrange hll 15 15] eq "\x00"} -# r pfadd hll a b c -# assert {[r getrange hll 15 15] eq "\x00"} -# r pfadd hll 1 2 3 -# assert {[r getrange hll 15 15] eq "\x80"} -# } -} diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl deleted file mode 100644 index 342bb939a..000000000 --- a/tests/unit/introspection.tcl +++ /dev/null @@ -1,59 +0,0 @@ -start_server {tags {"introspection"}} { - test {CLIENT LIST} { - r client list - } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=0 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*} - - test {MONITOR can log executed commands} { - set rd [redis_deferring_client] - $rd monitor - r set foo bar - r get foo - list [$rd read] [$rd read] [$rd read] - } {*OK*"set" "foo"*"get" "foo"*} - - test {MONITOR can log commands issued by the scripting engine} { - set rd [redis_deferring_client] - $rd monitor - r eval {redis.call('set',KEYS[1],ARGV[1])} 1 foo bar - $rd read ;# Discard the OK - assert_match {*eval*} [$rd read] - assert_match {*lua*"set"*"foo"*"bar"*} [$rd read] - } - - test {CLIENT GETNAME should return NIL if name is not assigned} { - r client getname - } {} - - test {CLIENT LIST shows empty fields for unassigned names} { - r client list - } {*name= *} - - test {CLIENT SETNAME does not accept spaces} { - catch {r client setname "foo bar"} e - set e - } {ERR*} - - test {CLIENT SETNAME can assign a name to this connection} { - assert_equal [r client setname myname] {OK} - r client list - } {*name=myname*} - - test {CLIENT SETNAME can change the name of an existing connection} { - assert_equal [r client setname someothername] {OK} - r client list - } {*name=someothername*} - - test {After CLIENT SETNAME, connection can still be closed} { - set rd [redis_deferring_client] - $rd client setname foobar - assert_equal [$rd read] "OK" - assert_match {*foobar*} [r client list] - $rd close - # Now the client should no longer be listed - wait_for_condition 50 100 { - [string match {*foobar*} [r client list]] == 0 - } else { - fail "Client still listed in CLIENT LIST after SETNAME." - } - } -} diff --git a/tests/unit/keys.tcl b/tests/unit/keys.tcl deleted file mode 100644 index cb62444f3..000000000 --- a/tests/unit/keys.tcl +++ /dev/null @@ -1,54 +0,0 @@ -start_server {tags {"keys"}} { - test {KEYS with pattern} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r set $key hello - } - assert_equal {foo_a foo_b foo_c} [r keys foo*] - assert_equal {foo_a foo_b foo_c} [r keys f*] - assert_equal {foo_a foo_b foo_c} [r keys f*o*] - } - - test {KEYS to get all keys} { - lsort [r keys *] - } {foo_a foo_b foo_c key_x key_y key_z} - - test {KEYS select by type} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r del $key - } - r set kv_1 value - r set kv_2 value - r hset hash_1 hash_field 1 - r hset hash_2 hash_field 1 - r lpush list_1 value - r lpush list_2 value - r zadd zset_1 1 "a" - r zadd zset_2 1 "a" - r sadd set_1 "a" - r sadd set_2 "a" - assert_equal {kv_1 kv_2} [r keys * string] - assert_equal {hash_1 hash_2} [r keys * hash] - assert_equal {list_1 list_2} [r keys * list] - assert_equal {zset_1 zset_2} [r keys * zset] - assert_equal {set_1 set_2} [r keys * set] - assert_equal {kv_1 kv_2 hash_1 hash_2 zset_1 zset_2 set_1 set_2 list_1 list_2} [r keys *] - assert_equal {kv_1 kv_2} [r keys * STRING] - assert_equal {hash_1 hash_2} [r keys * HASH] - assert_equal {list_1 list_2} [r keys * LIST] - assert_equal {zset_1 zset_2} [r keys * ZSET] - assert_equal {set_1 set_2} [r keys * SET] - } - - test {KEYS syntax error} { - catch {r keys * a} e1 - catch {r keys * strings} e2 - catch {r keys * c d} e3 - catch {r keys} e4 - catch {r keys * set zset} e5 - assert_equal {ERR syntax error} [set e1] - assert_equal {ERR syntax error} [set e2] - assert_equal {ERR syntax error} [set e3] - assert_equal {ERR wrong number of arguments for 'keys' command} [set e4] - assert_equal {ERR syntax error} [set e5] - } -} diff --git a/tests/unit/latency-monitor.tcl b/tests/unit/latency-monitor.tcl deleted file mode 100644 index b736cad98..000000000 --- a/tests/unit/latency-monitor.tcl +++ /dev/null @@ -1,50 +0,0 @@ -start_server {tags {"latency-monitor"}} { - # Set a threshold high enough to avoid spurious latency events. - r config set latency-monitor-threshold 200 - r latency reset - - test {Test latency events logging} { - r debug sleep 0.3 - after 1100 - r debug sleep 0.4 - after 1100 - r debug sleep 0.5 - assert {[r latency history command] >= 3} - } - - test {LATENCY HISTORY output is ok} { - set min 250 - set max 450 - foreach event [r latency history command] { - lassign $event time latency - assert {$latency >= $min && $latency <= $max} - incr min 100 - incr max 100 - set last_time $time ; # Used in the next test - } - } - - test {LATENCY LATEST output is ok} { - foreach event [r latency latest] { - lassign $event eventname time latency max - assert {$eventname eq "command"} - assert {$max >= 450 & $max <= 650} - assert {$time == $last_time} - break - } - } - - test {LATENCY HISTORY / RESET with wrong event name is fine} { - assert {[llength [r latency history blabla]] == 0} - assert {[r latency reset blabla] == 0} - } - - test {LATENCY DOCTOR produces some output} { - assert {[string length [r latency doctor]] > 0} - } - - test {LATENCY RESET is able to reset events} { - assert {[r latency reset] > 0} - assert {[r latency latest] eq {}} - } -} diff --git a/tests/unit/limits.tcl b/tests/unit/limits.tcl deleted file mode 100644 index b37ea9b0f..000000000 --- a/tests/unit/limits.tcl +++ /dev/null @@ -1,16 +0,0 @@ -start_server {tags {"limits"} overrides {maxclients 10}} { - test {Check if maxclients works refusing connections} { - set c 0 - catch { - while {$c < 50} { - incr c - set rd [redis_deferring_client] - $rd ping - $rd read - after 100 - } - } e - assert {$c > 8 && $c <= 10} - set e - } {*ERR max*reached*} -} diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl deleted file mode 100644 index 2f853f29d..000000000 --- a/tests/unit/maxmemory.tcl +++ /dev/null @@ -1,152 +0,0 @@ -start_server {tags {"maxmemory"}} { - test "Config get maxmemory." { - set maxm [r config get maxmemory] - assert {$maxm > 1} - } - - # The current maxmemory command does not support config set and policy. - # For a complete list of commands, refer to the wiki: https://github.com/OpenAtomFoundation/pika/wiki/pika-%E5%B7%AE%E5%BC%82%E5%8C%96%E5%91%BD%E4%BB%A4 - - # test "Without maxmemory small integers are shared" { - # r config set maxmemory 0 - # r set a 1 - # assert {[r object refcount a] > 1} - # } - - # test "With maxmemory and non-LRU policy integers are still shared" { - # r config set maxmemory 1073741824 - # r config set maxmemory-policy allkeys-random - # r set a 1 - # assert {[r object refcount a] > 1} - # } - - # test "With maxmemory and LRU policy integers are not shared" { - # r config set maxmemory 1073741824 - # r config set maxmemory-policy allkeys-lru - # r set a 1 - # r config set maxmemory-policy volatile-lru - # r set b 1 - # assert {[r object refcount a] == 1} - # assert {[r object refcount b] == 1} - # r config set maxmemory 0 - # } - - # foreach policy { - # allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl - # } { - # test "maxmemory - is the memory limit honoured? (policy $policy)" { - # # make sure to start with a blank instance - # r flushall - # # Get the current memory limit and calculate a new limit. - # # We just add 100k to the current memory size so that it is - # # fast for us to reach that limit. - # set used [s used_memory] - # set limit [expr {$used+100*1024}] - # r config set maxmemory $limit - # r config set maxmemory-policy $policy - # # Now add keys until the limit is almost reached. - # set numkeys 0 - # while 1 { - # r setex [randomKey] 10000 x - # incr numkeys - # if {[s used_memory]+4096 > $limit} { - # assert {$numkeys > 10} - # break - # } - # } - # # If we add the same number of keys already added again, we - # # should still be under the limit. - # for {set j 0} {$j < $numkeys} {incr j} { - # r setex [randomKey] 10000 x - # } - # assert {[s used_memory] < ($limit+4096)} - # } - # } - - # foreach policy { - # allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl - # } { - # test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" { - # # make sure to start with a blank instance - # r flushall - # # Get the current memory limit and calculate a new limit. - # # We just add 100k to the current memory size so that it is - # # fast for us to reach that limit. - # set used [s used_memory] - # set limit [expr {$used+100*1024}] - # r config set maxmemory $limit - # r config set maxmemory-policy $policy - # # Now add keys until the limit is almost reached. - # set numkeys 0 - # while 1 { - # r set [randomKey] x - # incr numkeys - # if {[s used_memory]+4096 > $limit} { - # assert {$numkeys > 10} - # break - # } - # } - # # If we add the same number of keys already added again and - # # the policy is allkeys-* we should still be under the limit. - # # Otherwise we should see an error reported by Redis. - # set err 0 - # for {set j 0} {$j < $numkeys} {incr j} { - # if {[catch {r set [randomKey] x} e]} { - # if {[string match {*used memory*} $e]} { - # set err 1 - # } - # } - # } - # if {[string match allkeys-* $policy]} { - # assert {[s used_memory] < ($limit+4096)} - # } else { - # assert {$err == 1} - # } - # } - # } - - # foreach policy { - # volatile-lru volatile-random volatile-ttl - # } { - # test "maxmemory - policy $policy should only remove volatile keys." { - # # make sure to start with a blank instance - # r flushall - # # Get the current memory limit and calculate a new limit. - # # We just add 100k to the current memory size so that it is - # # fast for us to reach that limit. - # set used [s used_memory] - # set limit [expr {$used+100*1024}] - # r config set maxmemory $limit - # r config set maxmemory-policy $policy - # # Now add keys until the limit is almost reached. - # set numkeys 0 - # while 1 { - # # Odd keys are volatile - # # Even keys are non volatile - # if {$numkeys % 2} { - # r setex "key:$numkeys" 10000 x - # } else { - # r set "key:$numkeys" x - # } - # if {[s used_memory]+4096 > $limit} { - # assert {$numkeys > 10} - # break - # } - # incr numkeys - # } - # # Now we add the same number of volatile keys already added. - # # We expect Redis to evict only volatile keys in order to make - # # space. - # set err 0 - # for {set j 0} {$j < $numkeys} {incr j} { - # catch {r setex "foo:$j" 10000 x} - # } - # # We should still be under the limit. - # assert {[s used_memory] < ($limit+4096)} - # # However all our non volatile keys should be here. - # for {set j 0} {$j < $numkeys} {incr j 2} { - # assert {[r exists "key:$j"]} - # } - # } - # } -} diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl deleted file mode 100644 index 7ca9a705b..000000000 --- a/tests/unit/memefficiency.tcl +++ /dev/null @@ -1,37 +0,0 @@ -proc test_memory_efficiency {range} { - r flushall - set rd [redis_deferring_client] - set base_mem [s used_memory] - set written 0 - for {set j 0} {$j < 10000} {incr j} { - set key key:$j - set val [string repeat A [expr {int(rand()*$range)}]] - $rd set $key $val - incr written [string length $key] - incr written [string length $val] - incr written 2 ;# A separator is the minimum to store key-value data. - } - for {set j 0} {$j < 10000} {incr j} { - $rd read ; # Discard replies - } - - set current_mem [s used_memory] - set used [expr {$current_mem-$base_mem}] - set efficiency [expr {double($written)/$used}] - return $efficiency -} - -start_server {tags {"memefficiency"}} { - foreach {size_range expected_min_efficiency} { - 32 0.15 - 64 0.25 - 128 0.35 - 1024 0.75 - 16384 0.82 - } { - test "Memory efficiency with values in range $size_range" { - set efficiency [test_memory_efficiency $size_range] - assert {$efficiency >= $expected_min_efficiency} - } - } -} diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl deleted file mode 100644 index 399221473..000000000 --- a/tests/unit/multi.tcl +++ /dev/null @@ -1,309 +0,0 @@ -start_server {tags {"multi"}} { - test {MUTLI / EXEC basics} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r multi - set v1 [r lrange mylist 0 -1] - set v2 [r ping] - set v3 [r exec] - list $v1 $v2 $v3 - } {QUEUED QUEUED {{a b c} PONG}} - - test {DISCARD} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r multi - set v1 [r del mylist] - set v2 [r discard] - set v3 [r lrange mylist 0 -1] - list $v1 $v2 $v3 - } {QUEUED OK {a b c}} - - test {Nested MULTI are not allowed} { - set err {} - r multi - catch {[r multi]} err - r exec - set _ $err - } {*ERR MULTI*} - - test {MULTI where commands alter argc/argv} { - r sadd myset a - r multi - r spop myset - list [r exec] [r exists myset] - } {a 0} - - test {WATCH inside MULTI is not allowed} { - set err {} - r multi - catch {[r watch x]} err - r exec - set _ $err - } {*ERR WATCH*} - - test {EXEC fails if there are errors while queueing commands #1} { - r del foo1 foo2 - r multi - r set foo1 bar1 - catch {r non-existing-command} - r set foo2 bar2 - catch {r exec} e - assert_match {EXECABORT*} $e - list [r exists foo1] [r exists foo2] - } {0 0} - -# test {EXEC fails if there are errors while queueing commands #2} { -# set rd [redis_deferring_client] -# r del foo1 foo2 -# r multi -# r set foo1 bar1 -# $rd config set maxmemory 1 -# assert {[$rd read] eq {OK}} -# catch {r lpush mylist myvalue} -# $rd config set maxmemory 0 -# assert {[$rd read] eq {OK}} -# r set foo2 bar2 -# catch {r exec} e -# assert_match {EXECABORT*} $e -# $rd close -# list [r exists foo1] [r exists foo2] -# } {0 0} - -# test {If EXEC aborts, the client MULTI state is cleared} { -# r del foo1 foo2 -# r multi -# r set foo1 bar1 -# catch {r non-existing-command} -# r set foo2 bar2 -# catch {r exec} e -# assert_match {EXECABORT*} $e -# r ping -# } {PONG} - -# test {EXEC works on WATCHed key not modified} { -# r watch x y z -# r watch k -# r multi -# r ping -# r exec -# } {PONG} - -# test {EXEC fail on WATCHed key modified (1 key of 1 watched)} { -# r set x 30 -# r watch x -# r set x 40 -# r multi -# r ping -# r exec -# } {} - -# test {EXEC fail on WATCHed key modified (1 key of 5 watched)} { -# r set x 30 -# r watch a b x k z -# r set x 40 -# r multi -# r ping -# r exec -# } {} - -# test {EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty} { -# r flushdb -# r lpush foo barsync" -# r watch foo -# r sort emptylist store foo -# r multi -# r ping -# r exec -# } {} - -# test {After successful EXEC key is no longer watched} { -# r set x 30 -# r watch x -# r multi -# r ping -# r exec -# r set x 40 -# r multi -# r ping -# r exec -# } {PONG} - -# test {After failed EXEC key is no longer watched} { -# r set x 30 -# r watch x -# r set x 40 -# r multi -# r ping -# r exec -# r set x 40 -# r multi -# r ping -# r exec -# } {PONG} - -# test {It is possible to UNWATCH} { -# r set x 30 -# r watch x -# r set x 40 -# r unwatch -# r multi -# r ping -# r exec -# } {PONG} - - test {UNWATCH when there is nothing watched works as expected} { - r unwatch - } {OK} - -# test {FLUSHALL is able to touch the watched keys} { -# r set x 30 -# r watch x -# r flushall -# r multi -# r ping -# r exec -# } {} - -# test {FLUSHALL does not touch non affected keys} { -# r del x -# r watch x -# r flushall -# r multi -# r ping -# r exec -# } {PONG} - -# test {FLUSHDB is able to touch the watched keys} { -# r set x 30 -# r watch x -# r flushdb -# r multi -# r ping -# r exec -# } {} - -# test {FLUSHDB does not touch non affected keys} { -# r del x -# r watch x -# r flushdb -# r multi -# r ping -# r exec -# } {PONG} - -# test {WATCH is able to remember the DB a key belongs to} { -# r select 5 -# r set x 30 -# r watch x -# r select 1 -# r set x 10 -# r select 5 -# r multi -# r ping -# set res [r exec] -# # Restore original DB -# r select 9 -# set res -# } {PONG} - -# test {WATCH will consider touched keys target of EXPIRE} { -# r del x -# r set x foo -# r watch x -# r expire x 10 -# r multi -# r ping -# r exec -# } {} - -# test {WATCH will not consider touched expired keys} { -# r del x -# r set x foo -# r expire x 1 -# r watch x -# after 1100 -# r multi -# r ping -# r exec -# } {PONG} - - test {DISCARD should clear the WATCH dirty flag on the client} { - r watch x - r set x 10 - r multi - r discard - r multi - r incr x - r exec - } {11} - - test {DISCARD should UNWATCH all the keys} { - r watch x - r set x 10 - r multi - r discard - r set x 10 - r multi - r incr x - r exec - } {11} - -# test {MULTI / EXEC is propagated correctly (single write command)} { -# set repl [attach_to_replication_stream] -# r multi -# r set foo bar -# r exec -# assert_replication_stream $repl { -# {select *} -# {multi} -# {set foo bar} -# {exec} -# } -# close_replication_stream $repl -# } -# -# test {MULTI / EXEC is propagated correctly (empty transaction)} { -# set repl [attach_to_replication_stream] -# r multi -# r exec -# r set foo bar -# assert_replication_stream $repl { -# {select *} -# {set foo bar} -# } -# close_replication_stream $repl -# } -# -# test {MULTI / EXEC is propagated correctly (read-only commands)} { -# r set foo value1 -# set repl [attach_to_replication_stream] -# r multi -# r get foo -# r exec -# r set foo value2 -# assert_replication_stream $repl { -# {select *} -# {set foo value2} -# } -# close_replication_stream $repl -# } -# -# test {MULTI / EXEC is propagated correctly (write command, no effect)} { -# r del bar foo bar -# set repl [attach_to_replication_stream] -# r multi -# r del foo -# r exec -# assert_replication_stream $repl { -# {select *} -# {multi} -# {exec} -# } -# close_replication_stream $repl -# } -} diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl deleted file mode 100644 index 5d625cf45..000000000 --- a/tests/unit/obuf-limits.tcl +++ /dev/null @@ -1,73 +0,0 @@ -start_server {tags {"obuf-limits"}} { - test {Client output buffer hard limit is enforced} { - r config set client-output-buffer-limit {pubsub 100000 0 0} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 200000} break - } - assert {$omem >= 90000 && $omem < 200000} - $rd1 close - } - - test {Client output buffer soft limit is not enforced if time is not overreached} { - r config set client-output-buffer-limit {pubsub 0 100000 10} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - set start_time 0 - set time_elapsed 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 100000} { - if {$start_time == 0} {set start_time [clock seconds]} - set time_elapsed [expr {[clock seconds]-$start_time}] - if {$time_elapsed >= 5} break - } - } - assert {$omem >= 100000 && $time_elapsed >= 5 && $time_elapsed <= 10} - $rd1 close - } - - test {Client output buffer soft limit is enforced if time is overreached} { - r config set client-output-buffer-limit {pubsub 0 100000 3} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - set start_time 0 - set time_elapsed 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 100000} { - if {$start_time == 0} {set start_time [clock seconds]} - set time_elapsed [expr {[clock seconds]-$start_time}] - if {$time_elapsed >= 10} break - } - } - assert {$omem >= 100000 && $time_elapsed < 6} - $rd1 close - } -} diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl deleted file mode 100644 index 90faf00a6..000000000 --- a/tests/unit/other.tcl +++ /dev/null @@ -1,245 +0,0 @@ -start_server {tags {"other"}} { - if {$::force_failure} { - # This is used just for test suite development purposes. - test {Failing test} { - format err - } {ok} - } - -# test {SAVE - make sure there are all the types as values} { -# # Wait for a background saving in progress to terminate -# waitForBgsave r -# r lpush mysavelist hello -# r lpush mysavelist world -# r set myemptykey {} -# r set mynormalkey {blablablba} -# r zadd mytestzset 10 a -# r zadd mytestzset 20 b -# r zadd mytestzset 30 c -# r save -# } {OK} - - tags {slow} { - if {$::accurate} {set iterations 10000} else {set iterations 1000} - foreach fuzztype {binary alpha compr} { - test "FUZZ stresser with data model $fuzztype" { - set err 0 - for {set i 0} {$i < $iterations} {incr i} { - set fuzz [randstring 0 512 $fuzztype] - r set foo $fuzz - set got [r get foo] - if {$got ne $fuzz} { - set err [list $fuzz $got] - break - } - } - set _ $err - } {0} - } - } - -# test {BGSAVE} { -# waitForBgsave r -# r flushdb -# r save -# r set x 10 -# r bgsave -# waitForBgsave r -# r debug reload -# r get x -# } {10} - - test {SELECT an out of range DB} { - catch {r select 1000000} err - set _ $err - } {*invalid*} - -# tags {consistency} { -# if {![catch {package require sha1}]} { -# if {$::accurate} {set numops 10000} else {set numops 1000} -# test {Check consistency of different data types after a reload} { -# r flushdb -# createComplexDataset r $numops -# set dump [csvdump r] -# set sha1 [r debug digest] -# r debug reload -# set sha1_after [r debug digest] -# if {$sha1 eq $sha1_after} { -# set _ 1 -# } else { -# set newdump [csvdump r] -# puts "Consistency test failed!" -# puts "You can inspect the two dumps in /tmp/repldump*.txt" -# -# set fd [open /tmp/repldump1.txt w] -# puts $fd $dump -# close $fd -# set fd [open /tmp/repldump2.txt w] -# puts $fd $newdump -# close $fd -# -# set _ 0 -# } -# } {1} - -# test {Same dataset digest if saving/reloading as AOF?} { -# r bgrewriteaof -# waitForBgrewriteaof r -# r debug loadaof -# set sha1_after [r debug digest] -# if {$sha1 eq $sha1_after} { -# set _ 1 -# } else { -# set newdump [csvdump r] -# puts "Consistency test failed!" -# puts "You can inspect the two dumps in /tmp/aofdump*.txt" -# -# set fd [open /tmp/aofdump1.txt w] -# puts $fd $dump -# close $fd -# set fd [open /tmp/aofdump2.txt w] -# puts $fd $newdump -# close $fd -# -# set _ 0 -# } -# } {1} -# } -# } - -# test {EXPIRES after a reload (snapshot + append only file rewrite)} { -# r flushdb -# r set x 10 -# r expire x 1000 -# r save -# r debug reload -# set ttl [r ttl x] -# set e1 [expr {$ttl > 900 && $ttl <= 1000}] -# r bgrewriteaof -# waitForBgrewriteaof r -# r debug loadaof -# set ttl [r ttl x] -# set e2 [expr {$ttl > 900 && $ttl <= 1000}] -# list $e1 $e2 -# } {1 1} - -# test {EXPIRES after AOF reload (without rewrite)} { -# r flushdb -# r config set appendonly yes -# r set x somevalue -# r expire x 1000 -# r setex y 2000 somevalue -# r set z somevalue -# r expireat z [expr {[clock seconds]+3000}] -# -# # Milliseconds variants -# r set px somevalue -# r pexpire px 1000000 -# r psetex py 2000000 somevalue -# r set pz somevalue -# r pexpireat pz [expr {([clock seconds]+3000)*1000}] -# -# # Reload and check -# waitForBgrewriteaof r -# # We need to wait two seconds to avoid false positives here, otherwise -# # the DEBUG LOADAOF command may read a partial file. -# # Another solution would be to set the fsync policy to no, since this -# # prevents write() to be delayed by the completion of fsync(). -# after 2000 -# r debug loadaof -# set ttl [r ttl x] -# assert {$ttl > 900 && $ttl <= 1000} -# set ttl [r ttl y] -# assert {$ttl > 1900 && $ttl <= 2000} -# set ttl [r ttl z] -# assert {$ttl > 2900 && $ttl <= 3000} -# set ttl [r ttl px] -# assert {$ttl > 900 && $ttl <= 1000} -# set ttl [r ttl py] -# assert {$ttl > 1900 && $ttl <= 2000} -# set ttl [r ttl pz] -# assert {$ttl > 2900 && $ttl <= 3000} -# r config set appendonly no -# } - - tags {protocol} { - test {PIPELINING stresser (also a regression for the old epoll bug)} { - set fd2 [socket $::host $::port] - fconfigure $fd2 -encoding binary -translation binary - puts -nonewline $fd2 "SELECT 9\r\n" - flush $fd2 - gets $fd2 - - for {set i 0} {$i < 100000} {incr i} { - set q {} - set val "0000${i}0000" - append q "SET key:$i $val\r\n" - puts -nonewline $fd2 $q - set q {} - append q "GET key:$i\r\n" - puts -nonewline $fd2 $q - } - flush $fd2 - - for {set i 0} {$i < 100000} {incr i} { - gets $fd2 line - gets $fd2 count - set count [string range $count 1 end] - set val [read $fd2 $count] - read $fd2 2 - } - close $fd2 - set _ 1 - } {1} - } - -# test {APPEND basics} { -# list [r append foo bar] [r get foo] \ -# [r append foo 100] [r get foo] -# } {3 bar 6 bar100} - - test {APPEND basics, integer encoded values} { - set res {} - r del foo - r append foo 1 - r append foo 2 - lappend res [r get foo] - r set foo 1 - r append foo 2 - lappend res [r get foo] - } {12 12} - - test {APPEND fuzzing} { - set err {} - foreach type {binary alpha compr} { - set buf {} - r del x - for {set i 0} {$i < 1000} {incr i} { - set bin [randstring 0 10 $type] - append buf $bin - r append x $bin - } - if {$buf != [r get x]} { - set err "Expected '$buf' found '[r get x]'" - break - } - } - set _ $err - } {} - - # Leave the user with a clean DB before to exit -# test {FLUSHDB} { -# set aux {} -# r select 9 -# r flushdb -# lappend aux [r dbsize] -# r select 10 -# r flushdb -# lappend aux [r dbsize] -# } {0 0} - -# test {Perform a final SAVE to leave a clean DB on disk} { -# waitForBgsave r -# r save -# } {OK} -} diff --git a/tests/unit/printver.tcl b/tests/unit/printver.tcl deleted file mode 100644 index b164ac74c..000000000 --- a/tests/unit/printver.tcl +++ /dev/null @@ -1,6 +0,0 @@ -start_server {} { - set i [r info] - regexp {pika_version:(.*?)\r\n} $i - version - regexp {pika_git_sha:(.*?)\r\n} $i - sha1 - puts "Testing Pika version $version ($sha1)" -} diff --git a/tests/unit/protocol.tcl b/tests/unit/protocol.tcl deleted file mode 100644 index 492d4bede..000000000 --- a/tests/unit/protocol.tcl +++ /dev/null @@ -1,117 +0,0 @@ -start_server {tags {"protocol"}} { - test "Handle an empty query" { - reconnect - r write "\r\n" - r flush - assert_equal "PONG" [r ping] - } - -# test "Negative multibulk length" { -# reconnect -# r write "*-10\r\n" -# r flush -# assert_equal PONG [r ping] -# } - -# test "Out of range multibulk length" { -# reconnect -# r write "*20000000\r\n" -# r flush -# assert_error "*invalid multibulk length*" {r read} -# } -# -# test "Wrong multibulk payload header" { -# reconnect -# r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\nfooz\r\n" -# r flush -# assert_error "*expected '$', got 'f'*" {r read} -# } -# -# test "Negative multibulk payload length" { -# reconnect -# r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$-10\r\n" -# r flush -# assert_error "*invalid bulk length*" {r read} -# } -# -# test "Out of range multibulk payload length" { -# reconnect -# r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$2000000000\r\n" -# r flush -# assert_error "*invalid bulk length*" {r read} -# } -# -# test "Non-number multibulk payload length" { -# reconnect -# r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$blabla\r\n" -# r flush -# assert_error "*invalid bulk length*" {r read} -# } -# -# test "Multi bulk request not followed by bulk arguments" { -# reconnect -# r write "*1\r\nfoo\r\n" -# r flush -# assert_error "*expected '$', got 'f'*" {r read} -# } -# -# test "Generic wrong number of args" { -# reconnect -# assert_error "*wrong*arguments*ping*" {r ping x y z} -# } -# -# test "Unbalanced number of quotes" { -# reconnect -# r write "set \"\"\"test-key\"\"\" test-value\r\n" -# r write "ping\r\n" -# r flush -# assert_error "*unbalanced*" {r read} -# } - -# set c 0 -# foreach seq [list "\x00" "*\x00" "$\x00"] { -# incr c -# test "Protocol desync regression test #$c" { -# set s [socket [srv 0 host] [srv 0 port]] -# puts -nonewline $s $seq -# set payload [string repeat A 1024]"\n" -# set test_start [clock seconds] -# set test_time_limit 30 -# while 1 { -# if {[catch { -# puts -nonewline $s payload -# flush $s -# incr payload_size [string length $payload] -# }]} { -# set retval [gets $s] -# close $s -# break -# } else { -# set elapsed [expr {[clock seconds]-$test_start}] -# if {$elapsed > $test_time_limit} { -# close $s -# error "assertion:Redis did not closed connection after protocol desync" -# } -# } -# } -# set retval -# } {*Protocol error*} -# } -# unset c -} - -start_server {tags {"regression"}} { - test "Regression for a crash with blocking ops and pipelining" { - set rd [redis_deferring_client] - set fd [r channel] - set proto "*3\r\n\$5\r\nBLPOP\r\n\$6\r\nnolist\r\n\$1\r\n0\r\n" - puts -nonewline $fd $proto$proto - flush $fd - set res {} - - $rd rpush nolist a - $rd read - $rd rpush nolist a - $rd read - } -} diff --git a/tests/unit/pubsub.tcl b/tests/unit/pubsub.tcl deleted file mode 100644 index 60930ae27..000000000 --- a/tests/unit/pubsub.tcl +++ /dev/null @@ -1,399 +0,0 @@ -start_server {tags {"pubsub"}} { - proc __consume_subscribe_messages {client type channels} { - set numsub -1 - set counts {} - - for {set i [llength $channels]} {$i > 0} {incr i -1} { - set msg [$client read] - assert_equal $type [lindex $msg 0] - - # when receiving subscribe messages the channels names - # are ordered. when receiving unsubscribe messages - # they are unordered - set idx [lsearch -exact $channels [lindex $msg 1]] - if {[string match "*unsubscribe" $type]} { - assert {$idx >= 0} - } else { - assert {$idx == 0} - } - set channels [lreplace $channels $idx $idx] - - # aggregate the subscription count to return to the caller - lappend counts [lindex $msg 2] - } - - # we should have received messages for channels - assert {[llength $channels] == 0} - return $counts - } - - proc subscribe {client channels} { - $client subscribe {*}$channels - __consume_subscribe_messages $client subscribe $channels - } - - proc unsubscribe {client {channels {}}} { - $client unsubscribe {*}$channels - __consume_subscribe_messages $client unsubscribe $channels - } - - proc psubscribe {client channels} { - $client psubscribe {*}$channels - __consume_subscribe_messages $client psubscribe $channels - } - - proc punsubscribe {client {channels {}}} { - $client punsubscribe {*}$channels - __consume_subscribe_messages $client punsubscribe $channels - } - -# test "Pub/Sub PING" { -# set rd1 [redis_deferring_client] -# subscribe $rd1 somechannel -# # While subscribed to non-zero channels PING works in Pub/Sub mode. -# $rd1 ping -# set reply1 [$rd1 read] -# unsubscribe $rd1 somechannel -# # Now we are unsubscribed, PING should just return PONG. -# $rd1 ping -# set reply2 [$rd1 read] -# $rd1 close -# list $reply1 $reply2 -# } {PONG PONG} - - test "PUBLISH/SUBSCRIBE basics" { - set rd1 [redis_deferring_client] - - # subscribe to two channels - assert_equal {1 2} [subscribe $rd1 {chan1 chan2}] - assert_equal 1 [r publish chan1 hello] - assert_equal 1 [r publish chan2 world] - assert_equal {message chan1 hello} [$rd1 read] - assert_equal {message chan2 world} [$rd1 read] - - # unsubscribe from one of the channels - unsubscribe $rd1 {chan1} - assert_equal 0 [r publish chan1 hello] - assert_equal 1 [r publish chan2 world] - assert_equal {message chan2 world} [$rd1 read] - - # unsubscribe from the remaining channel - unsubscribe $rd1 {chan2} - assert_equal 0 [r publish chan1 hello] - assert_equal 0 [r publish chan2 world] - - # clean up clients - $rd1 close - } - - test "PUBLISH/SUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - assert_equal {1} [subscribe $rd1 {chan1}] - assert_equal {1} [subscribe $rd2 {chan1}] - assert_equal 2 [r publish chan1 hello] - assert_equal {message chan1 hello} [$rd1 read] - assert_equal {message chan1 hello} [$rd2 read] - - # clean up clients - $rd1 close - $rd2 close - } - - test "PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] - assert_equal {1 2 3} [subscribe $rd1 {chan1 chan2 chan3}] - unsubscribe $rd1 - assert_equal 0 [r publish chan1 hello] - assert_equal 0 [r publish chan2 hello] - assert_equal 0 [r publish chan3 hello] - - # clean up clients - $rd1 close - } - - test "SUBSCRIBE to one channel more than once" { - set rd1 [redis_deferring_client] - assert_equal {1 1 1} [subscribe $rd1 {chan1 chan1 chan1}] - assert_equal 1 [r publish chan1 hello] - assert_equal {message chan1 hello} [$rd1 read] - - # clean up clients - $rd1 close - } - - test "UNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] - assert_equal {0 0 0} [unsubscribe $rd1 {foo bar quux}] - - # clean up clients - $rd1 close - } - - test "PUBLISH/PSUBSCRIBE basics" { - set rd1 [redis_deferring_client] - - # subscribe to two patterns - assert_equal {1 2} [psubscribe $rd1 {foo.* bar.*}] - assert_equal 1 [r publish foo.1 hello] - assert_equal 1 [r publish bar.1 hello] - assert_equal 0 [r publish foo1 hello] - assert_equal 0 [r publish barfoo.1 hello] - assert_equal 0 [r publish qux.1 hello] - assert_equal {pmessage foo.* foo.1 hello} [$rd1 read] - assert_equal {pmessage bar.* bar.1 hello} [$rd1 read] - - # unsubscribe from one of the patterns - assert_equal {1} [punsubscribe $rd1 {foo.*}] - assert_equal 0 [r publish foo.1 hello] - assert_equal 1 [r publish bar.1 hello] - assert_equal {pmessage bar.* bar.1 hello} [$rd1 read] - - # unsubscribe from the remaining pattern - assert_equal {0} [punsubscribe $rd1 {bar.*}] - assert_equal 0 [r publish foo.1 hello] - assert_equal 0 [r publish bar.1 hello] - - # clean up clients - $rd1 close - } - - test "PUBLISH/PSUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - assert_equal {1} [psubscribe $rd1 {chan.*}] - assert_equal {1} [psubscribe $rd2 {chan.*}] - assert_equal 2 [r publish chan.foo hello] - assert_equal {pmessage chan.* chan.foo hello} [$rd1 read] - assert_equal {pmessage chan.* chan.foo hello} [$rd2 read] - - # clean up clients - $rd1 close - $rd2 close - } - - test "PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] - assert_equal {1 2 3} [psubscribe $rd1 {chan1.* chan2.* chan3.*}] - punsubscribe $rd1 - assert_equal 0 [r publish chan1.hi hello] - assert_equal 0 [r publish chan2.hi hello] - assert_equal 0 [r publish chan3.hi hello] - - # clean up clients - $rd1 close - } - - test "PUNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] - assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}] - - # clean up clients - $rd1 close - } - - test "NUMSUB returns numbers, not strings (#1561)" { - r pubsub numsub abc def - } {abc 0 def 0} - - test "PubSub return value" { - set rd1 [redis_deferring_client] - assert_equal {1} [subscribe $rd1 {foo.bar}] - assert_equal {2} [psubscribe $rd1 {foo.*}] - assert_equal {foo.bar} [r pubsub channels] - assert_equal {1} [r pubsub numpat] - assert_equal {foo.bar 1} [r pubsub numsub foo.bar] - - $rd1 close - } - - test "Mix SUBSCRIBE and PSUBSCRIBE" { - set rd1 [redis_deferring_client] - assert_equal {1} [subscribe $rd1 {foo.bar}] - assert_equal {2} [psubscribe $rd1 {foo.*}] - - assert_equal 2 [r publish foo.bar hello] - assert_equal {message foo.bar hello} [$rd1 read] - assert_equal {pmessage foo.* foo.bar hello} [$rd1 read] - - # clean up clients - $rd1 close - } - - test "PUNSUBSCRIBE and UNSUBSCRIBE should always reply" { - # Make sure we are not subscribed to any channel at all. - r punsubscribe - r unsubscribe - # Now check if the commands still reply correctly. - set reply1 [r punsubscribe] - set reply2 [r unsubscribe] - concat $reply1 $reply2 - } {punsubscribe {} 0 unsubscribe {} 0} - - ### Keyspace events notification tests - -# test "Keyspace notifications: we receive keyspace notifications" { -# r config set notify-keyspace-events KA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyspace@9__:foo set} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we receive keyevent notifications" { -# r config set notify-keyspace-events EA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyevent@9__:set foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we can receive both kind of events" { -# r config set notify-keyspace-events KEA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyspace@9__:foo set} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:set foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we are able to mask events" { -# r config set notify-keyspace-events KEl -# r del mylist -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r lpush mylist a -# # No notification for set, because only list commands are enabled. -# assert_equal {pmessage * __keyspace@9__:mylist lpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:lpush mylist} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: general events test" { -# r config set notify-keyspace-events KEg -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r expire foo 1 -# r del foo -# assert_equal {pmessage * __keyspace@9__:foo expire} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:expire foo} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:foo del} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:del foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: list events test" { -# r config set notify-keyspace-events KEl -# r del mylist -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r lpush mylist a -# r rpush mylist a -# r rpop mylist -# assert_equal {pmessage * __keyspace@9__:mylist lpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:lpush mylist} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:mylist rpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:rpush mylist} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:mylist rpop} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:rpop mylist} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: set events test" { -# r config set notify-keyspace-events Ks -# r del myset -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r sadd myset a b c d -# r srem myset x -# r sadd myset x y z -# r srem myset x -# assert_equal {pmessage * __keyspace@9__:myset sadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myset sadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myset srem} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: zset events test" { -# r config set notify-keyspace-events Kz -# r del myzset -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r zadd myzset 1 a 2 b -# r zrem myzset x -# r zadd myzset 3 x 4 y 5 z -# r zrem myzset x -# assert_equal {pmessage * __keyspace@9__:myzset zadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myzset zadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myzset zrem} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: hash events test" { -# r config set notify-keyspace-events Kh -# r del myhash -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r hmset myhash yes 1 no 0 -# r hincrby myhash yes 10 -# assert_equal {pmessage * __keyspace@9__:myhash hset} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myhash hincrby} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: expired events (triggered expire)" { -# r config set notify-keyspace-events Ex -# r del foo -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r psetex foo 100 1 -# wait_for_condition 50 100 { -# [r exists foo] == 0 -# } else { -# fail "Key does not expire?!" -# } -# assert_equal {pmessage * __keyevent@9__:expired foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: expired events (background expire)" { -# r config set notify-keyspace-events Ex -# r del foo -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r psetex foo 100 1 -# assert_equal {pmessage * __keyevent@9__:expired foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: evicted events" { -# r config set notify-keyspace-events Ee -# r config set maxmemory-policy allkeys-lru -# r flushdb -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r config set maxmemory 1 -# assert_equal {pmessage * __keyevent@9__:evicted foo} [$rd1 read] -# r config set maxmemory 0 -# $rd1 close -# } -# -# test "Keyspace notifications: test CONFIG GET/SET of event flags" { -# r config set notify-keyspace-events gKE -# assert_equal {gKE} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events {$lshzxeKE} -# assert_equal {$lshzxeKE} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events KA -# assert_equal {AK} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events EA -# assert_equal {AE} [lindex [r config get notify-keyspace-events] 1] -# } -#} diff --git a/tests/unit/quit.tcl b/tests/unit/quit.tcl deleted file mode 100644 index 090fe54ce..000000000 --- a/tests/unit/quit.tcl +++ /dev/null @@ -1,40 +0,0 @@ -start_server {tags {"quit"}} { - proc format_command {args} { - set cmd "*[llength $args]\r\n" - foreach a $args { - append cmd "$[string length $a]\r\n$a\r\n" - } - set _ $cmd - } - - test "QUIT returns OK" { - reconnect - assert_equal OK [r quit] - assert_error * {r ping} - } - -# test "Pipelined commands after QUIT must not be executed" { -# reconnect -# r write [format_command quit] -# r write [format_command set foo bar] -# r flush -# assert_equal OK [r read] -# assert_error * {r read} - -# reconnect -# assert_equal {} [r get foo] -# } - -# test "Pipelined commands after QUIT that exceed read buffer size" { -# reconnect -# r write [format_command quit] -# r write [format_command set foo [string repeat "x" 1024]] -# r flush -# assert_equal OK [r read] -# assert_error * {r read} -# -# reconnect -# assert_equal {} [r get foo] -# -# } -} diff --git a/tests/unit/scan.tcl b/tests/unit/scan.tcl deleted file mode 100644 index ed2b41bdb..000000000 --- a/tests/unit/scan.tcl +++ /dev/null @@ -1,286 +0,0 @@ -start_server {tags {"scan"}} { - test "SCAN basic" { - r flushdb - populate 1000 - #populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 1000 [llength $keys] - } - - test "SCAN COUNT" { - r flushdb - populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur count 5] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 1000 [llength $keys] - } - - test "SCAN MATCH" { - r flushdb - populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur match "key:1??"] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 100 [llength $keys] - } - - test "SCAN TYPE" { - r flushdb - # populate only creates strings - populate 1000 - - # Check non-strings are excluded - set cur 0 - set keys {} - while 1 { - set res [r scan $cur type "list"] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - assert_equal 0 [llength $keys] - - # Check strings are included - set cur 0 - set keys {} - while 1 { - set res [r scan $cur type "string"] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - assert_equal 1000 [llength $keys] - - # Check all three args work together - set cur 0 - set keys {} - while 1 { - set res [r scan $cur type "string" match "key:*" count 10] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - assert_equal 1000 [llength $keys] - } - - foreach enc {intset hashtable} { - test "SSCAN with encoding $enc" { - # Create the Set - r del set - if {$enc eq {intset}} { - set prefix "" - } else { - set prefix "ele:" - } - set elements {} - for {set j 0} {$j < 100} {incr j} { - lappend elements ${prefix}${j} - } - r sadd set {*}$elements - - # Verify that the encoding matches. - # assert {[r object encoding set] eq $enc} - - # Test SSCAN - set cur 0 - set keys {} - while 1 { - set res [r sscan set $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 100 [llength $keys] - } - } - - foreach enc {ziplist hashtable} { - test "HSCAN with encoding $enc" { - # Create the Hash - r del hash - if {$enc eq {ziplist}} { - set count 30 - } else { - set count 1000 - } - set elements {} - for {set j 0} {$j < $count} {incr j} { - lappend elements key:$j $j - } - r hmset hash {*}$elements - - # Verify that the encoding matches. - # assert {[r object encoding hash] eq $enc} - - # Test HSCAN - set cur 0 - set keys {} - while 1 { - set res [r hscan hash $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys2 {} - foreach {k v} $keys { - assert {$k eq "key:$v"} - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal $count [llength $keys2] - } - } - - foreach enc {ziplist skiplist} { - test "ZSCAN with encoding $enc" { - # Create the Sorted Set - r del zset - if {$enc eq {ziplist}} { - set count 30 - } else { - set count 1000 - } - set elements {} - for {set j 0} {$j < $count} {incr j} { - lappend elements $j key:$j - } - r zadd zset {*}$elements - - # Verify that the encoding matches. - # - # assert {[r object encoding zset] eq $enc} - - # Test ZSCAN - set cur 0 - set keys {} - while 1 { - set res [r zscan zset $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys2 {} - foreach {k v} $keys { - assert {$k eq "key:$v"} - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal $count [llength $keys2] - } - } - - test "SCAN guarantees check under write load" { - r flushdb - populate 100 - - # We start scanning here, so keys from 0 to 99 should all be - # reported at the end of the iteration. - set keys {} - while 1 { - set res [r scan $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - # Write 10 random keys at every SCAN iteration. - for {set j 0} {$j < 10} {incr j} { - r set addedkey:[randomInt 1000] foo - } - } - - set keys2 {} - foreach k $keys { - if {[string length $k] > 6} continue - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal 100 [llength $keys2] - } - -# test "SSCAN with integer encoded object (issue #1345)" { -# set objects {1 a} -# r del set -# r sadd set {*}$objects -# set res [r sscan set 0 MATCH *a* COUNT 100] -# assert_equal [lsort -unique [lindex $res 1]] {a} -# set res [r sscan set 0 MATCH *1* COUNT 100] -# assert_equal [lsort -unique [lindex $res 1]] {1} -# } - - test "SSCAN with PATTERN" { - r del mykey - r sadd mykey foo fab fiz foobar 1 2 3 4 - set res [r sscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } {foo foobar} - - test "HSCAN with PATTERN" { - r del mykey - r hmset mykey foo 1 fab 2 fiz 3 foobar 10 1 a 2 b 3 c 4 d - set res [r hscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } {1 10 foo foobar} - - test "ZSCAN with PATTERN" { - r del mykey - r zadd mykey 1 foo 2 fab 3 fiz 10 foobar - set res [r zscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } - - test "ZSCAN scores: regression test for issue #2175" { - r del mykey - for {set j 0} {$j < 500} {incr j} { - r zadd mykey 9.8813129168249309e-323 $j - } - set res [lindex [r zscan mykey 0] 1] - set first_score [lindex $res 1] - assert {$first_score != 0} - } -} diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl deleted file mode 100644 index e1cd2174b..000000000 --- a/tests/unit/scripting.tcl +++ /dev/null @@ -1,606 +0,0 @@ -start_server {tags {"scripting"}} { - test {EVAL - Does Lua interpreter replies to our requests?} { - r eval {return 'hello'} 0 - } {hello} - - test {EVAL - Lua integer -> Redis protocol type conversion} { - r eval {return 100.5} 0 - } {100} - - test {EVAL - Lua string -> Redis protocol type conversion} { - r eval {return 'hello world'} 0 - } {hello world} - - test {EVAL - Lua true boolean -> Redis protocol type conversion} { - r eval {return true} 0 - } {1} - - test {EVAL - Lua false boolean -> Redis protocol type conversion} { - r eval {return false} 0 - } {} - - test {EVAL - Lua status code reply -> Redis protocol type conversion} { - r eval {return {ok='fine'}} 0 - } {fine} - - test {EVAL - Lua error reply -> Redis protocol type conversion} { - catch { - r eval {return {err='this is an error'}} 0 - } e - set _ $e - } {this is an error} - - test {EVAL - Lua table -> Redis protocol type conversion} { - r eval {return {1,2,3,'ciao',{1,2}}} 0 - } {1 2 3 ciao {1 2}} - - test {EVAL - Are the KEYS and ARGV arrays populated correctly?} { - r eval {return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}} 2 a b c d - } {a b c d} - - test {EVAL - is Lua able to call Redis API?} { - r set mykey myval - r eval {return redis.call('get',KEYS[1])} 1 mykey - } {myval} - - test {EVALSHA - Can we call a SHA1 if already defined?} { - r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey - } {myval} - - test {EVALSHA - Can we call a SHA1 in uppercase?} { - r evalsha FD758D1589D044DD850A6F05D52F2EEFD27F033F 1 mykey - } {myval} - - test {EVALSHA - Do we get an error on invalid SHA1?} { - catch {r evalsha NotValidShaSUM 0} e - set _ $e - } {NOSCRIPT*} - - test {EVALSHA - Do we get an error on non defined SHA1?} { - catch {r evalsha ffd632c7d33e571e9f24556ebed26c3479a87130 0} e - set _ $e - } {NOSCRIPT*} - - test {EVAL - Redis integer -> Lua type conversion} { - r eval { - local foo = redis.pcall('incr','x') - return {type(foo),foo} - } 0 - } {number 1} - - test {EVAL - Redis bulk -> Lua type conversion} { - r set mykey myval - r eval { - local foo = redis.pcall('get','mykey') - return {type(foo),foo} - } 0 - } {string myval} - - test {EVAL - Redis multi bulk -> Lua type conversion} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r eval { - local foo = redis.pcall('lrange','mylist',0,-1) - return {type(foo),foo[1],foo[2],foo[3],# foo} - } 0 - } {table a b c 3} - - test {EVAL - Redis status reply -> Lua type conversion} { - r eval { - local foo = redis.pcall('set','mykey','myval') - return {type(foo),foo['ok']} - } 0 - } {table OK} - - test {EVAL - Redis error reply -> Lua type conversion} { - r set mykey myval - r eval { - local foo = redis.pcall('incr','mykey') - return {type(foo),foo['err']} - } 0 - } {table {ERR value is not an integer or out of range}} - - test {EVAL - Redis nil bulk reply -> Lua type conversion} { - r del mykey - r eval { - local foo = redis.pcall('get','mykey') - return {type(foo),foo == false} - } 0 - } {boolean 1} - - test {EVAL - Is the Lua client using the currently selected DB?} { - r set mykey "this is DB 9" - r select 10 - r set mykey "this is DB 10" - r eval {return redis.pcall('get','mykey')} 0 - } {this is DB 10} - - test {EVAL - SELECT inside Lua should not affect the caller} { - # here we DB 10 is selected - r set mykey "original value" - r eval {return redis.pcall('select','9')} 0 - set res [r get mykey] - r select 9 - set res - } {original value} - - if 0 { - test {EVAL - Script can't run more than configured time limit} { - r config set lua-time-limit 1 - catch { - r eval { - local i = 0 - while true do i=i+1 end - } 0 - } e - set _ $e - } {*execution time*} - } - - test {EVAL - Scripts can't run certain commands} { - set e {} - catch {r eval {return redis.pcall('spop','x')} 0} e - set e - } {*not allowed*} - - test {EVAL - Scripts can't run certain commands} { - set e {} - catch { - r eval "redis.pcall('randomkey'); return redis.pcall('set','x','ciao')" 0 - } e - set e - } {*not allowed after*} - - test {EVAL - No arguments to redis.call/pcall is considered an error} { - set e {} - catch {r eval {return redis.call()} 0} e - set e - } {*one argument*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - catch { - r eval "redis.call('nosuchcommand')" 0 - } e - set e - } {*Unknown Redis*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - catch { - r eval "redis.call('get','a','b','c')" 0 - } e - set e - } {*number of args*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - r set foo bar - catch { - r eval {redis.call('lpush',KEYS[1],'val')} 1 foo - } e - set e - } {*against a key*} - - test {EVAL - JSON numeric decoding} { - # We must return the table as a string because otherwise - # Redis converts floats to ints and we get 0 and 1023 instead - # of 0.0003 and 1023.2 as the parsed output. - r eval {return - table.concat( - cjson.decode( - "[0.0, -5e3, -1, 0.3e-3, 1023.2, 0e10]"), " ") - } 0 - } {0 -5000 -1 0.0003 1023.2 0} - - test {EVAL - JSON string decoding} { - r eval {local decoded = cjson.decode('{"keya": "a", "keyb": "b"}') - return {decoded.keya, decoded.keyb} - } 0 - } {a b} - - test {EVAL - cmsgpack can pack double?} { - r eval {local encoded = cmsgpack.pack(0.1) - local h = "" - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - return h - } 0 - } {cb3fb999999999999a} - - test {EVAL - cmsgpack can pack negative int64?} { - r eval {local encoded = cmsgpack.pack(-1099511627776) - local h = "" - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - return h - } 0 - } {d3ffffff0000000000} - - test {EVAL - cmsgpack can pack and unpack circular references?} { - r eval {local a = {x=nil,y=5} - local b = {x=a} - a['x'] = b - local encoded = cmsgpack.pack(a) - local h = "" - -- cmsgpack encodes to a depth of 16, but can't encode - -- references, so the encoded object has a deep copy recusive - -- depth of 16. - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - -- when unpacked, re.x.x != re because the unpack creates - -- individual tables down to a depth of 16. - -- (that's why the encoded output is so large) - local re = cmsgpack.unpack(encoded) - assert(re) - assert(re.x) - assert(re.x.x.y == re.y) - assert(re.x.x.x.x.y == re.y) - assert(re.x.x.x.x.x.x.y == re.y) - assert(re.x.x.x.x.x.x.x.x.x.x.y == re.y) - -- maximum working depth: - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.y == re.y) - -- now the last x would be b above and has no y - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x) - -- so, the final x.x is at the depth limit and was assigned nil - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x == nil) - return {h, re.x.x.x.x.x.x.x.x.y == re.y, re.y == 5} - } 0 - } {82a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a178c0 1 1} - - test {EVAL - Numerical sanity check from bitop} { - r eval {assert(0x7fffffff == 2147483647, "broken hex literals"); - assert(0xffffffff == -1 or 0xffffffff == 2^32-1, - "broken hex literals"); - assert(tostring(-1) == "-1", "broken tostring()"); - assert(tostring(0xffffffff) == "-1" or - tostring(0xffffffff) == "4294967295", - "broken tostring()") - } 0 - } {} - - test {EVAL - Verify minimal bitop functionality} { - r eval {assert(bit.tobit(1) == 1); - assert(bit.band(1) == 1); - assert(bit.bxor(1,2) == 3); - assert(bit.bor(1,2,4,8,16,32,64,128) == 255) - } 0 - } {} - - test {SCRIPTING FLUSH - is able to clear the scripts cache?} { - r set mykey myval - set v [r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey] - assert_equal $v myval - set e "" - r script flush - catch {r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey} e - set e - } {NOSCRIPT*} - - test {SCRIPT EXISTS - can detect already defined scripts?} { - r eval "return 1+1" 0 - r script exists a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bd9 a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bda - } {1 0} - - test {SCRIPT LOAD - is able to register scripts in the scripting cache} { - list \ - [r script load "return 'loaded'"] \ - [r evalsha b534286061d4b9e4026607613b95c06c06015ae8 0] - } {b534286061d4b9e4026607613b95c06c06015ae8 loaded} - - test "In the context of Lua the output of random commands gets ordered" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r eval {return redis.call('smembers',KEYS[1])} 1 myset - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT is normally not alpha re-ordered for the scripting engine" { - r del myset - r sadd myset 1 2 3 4 10 - r eval {return redis.call('sort',KEYS[1],'desc')} 1 myset - } {10 4 3 2 1} - - test "SORT BY output gets ordered for scripting" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r eval {return redis.call('sort',KEYS[1],'by','_')} 1 myset - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT BY with GET gets ordered for scripting" { - r del myset - r sadd myset a b c - r eval {return redis.call('sort',KEYS[1],'by','_','get','#','get','_:*')} 1 myset - } {a {} b {} c {}} - - test "redis.sha1hex() implementation" { - list [r eval {return redis.sha1hex('')} 0] \ - [r eval {return redis.sha1hex('Pizza & Mandolino')} 0] - } {da39a3ee5e6b4b0d3255bfef95601890afd80709 74822d82031af7493c20eefa13bd07ec4fada82f} - - test {Globals protection reading an undeclared global variable} { - catch {r eval {return a} 0} e - set e - } {*ERR*attempted to access unexisting global*} - - test {Globals protection setting an undeclared global*} { - catch {r eval {a=10} 0} e - set e - } {*ERR*attempted to create global*} - - test {Test an example script DECR_IF_GT} { - set decr_if_gt { - local current - - current = redis.call('get',KEYS[1]) - if not current then return nil end - if current > ARGV[1] then - return redis.call('decr',KEYS[1]) - else - return redis.call('get',KEYS[1]) - end - } - r set foo 5 - set res {} - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - set res - } {4 3 2 2 2} - - test {Scripting engine resets PRNG at every script execution} { - set rand1 [r eval {return tostring(math.random())} 0] - set rand2 [r eval {return tostring(math.random())} 0] - assert_equal $rand1 $rand2 - } - - test {Scripting engine PRNG can be seeded correctly} { - set rand1 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 10] - set rand2 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 10] - set rand3 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 20] - assert_equal $rand1 $rand2 - assert {$rand2 ne $rand3} - } - - test {EVAL does not leak in the Lua stack} { - r set x 0 - # Use a non blocking client to speedup the loop. - set rd [redis_deferring_client] - for {set j 0} {$j < 10000} {incr j} { - $rd eval {return redis.call("incr",KEYS[1])} 1 x - } - for {set j 0} {$j < 10000} {incr j} { - $rd read - } - assert {[s used_memory_lua] < 1024*100} - $rd close - r get x - } {10000} - - test {EVAL processes writes from AOF in read-only slaves} { - r flushall - r config set appendonly yes - r eval {redis.call("set",KEYS[1],"100")} 1 foo - r eval {redis.call("incr",KEYS[1])} 1 foo - r eval {redis.call("incr",KEYS[1])} 1 foo - wait_for_condition 50 100 { - [s aof_rewrite_in_progress] == 0 - } else { - fail "AOF rewrite can't complete after CONFIG SET appendonly yes." - } - r config set slave-read-only yes - r slaveof 127.0.0.1 0 - r debug loadaof - set res [r get foo] - r slaveof no one - set res - } {102} - - test {We can call scripts rewriting client->argv from Lua} { - r del myset - r sadd myset a b c - r mset a 1 b 2 c 3 d 4 - assert {[r spop myset] ne {}} - assert {[r spop myset] ne {}} - assert {[r spop myset] ne {}} - assert {[r mget a b c d] eq {1 2 3 4}} - assert {[r spop myset] eq {}} - } - - test {Call Redis command with many args from Lua (issue #1764)} { - r eval { - local i - local x={} - redis.call('del','mylist') - for i=1,100 do - table.insert(x,i) - end - redis.call('rpush','mylist',unpack(x)) - return redis.call('lrange','mylist',0,-1) - } 0 - } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100} - - test {Number conversion precision test (issue #1118)} { - r eval { - local value = 9007199254740991 - redis.call("set","foo",value) - return redis.call("get","foo") - } 0 - } {9007199254740991} - - test {String containing number precision test (regression of issue #1118)} { - r eval { - redis.call("set", "key", "12039611435714932082") - return redis.call("get", "key") - } 0 - } {12039611435714932082} - - test {Verify negative arg count is error instead of crash (issue #1842)} { - catch { r eval { return "hello" } -12 } e - set e - } {ERR Number of keys can't be negative} - - test {Correct handling of reused argv (issue #1939)} { - r eval { - for i = 0, 10 do - redis.call('SET', 'a', '1') - redis.call('MGET', 'a', 'b', 'c') - redis.call('EXPIRE', 'a', 0) - redis.call('GET', 'a') - redis.call('MGET', 'a', 'b', 'c') - end - } 0 - } -} - -# Start a new server since the last test in this stanza will kill the -# instance at all. -start_server {tags {"scripting"}} { - test {Timedout read-only scripts can be killed by SCRIPT KILL} { - set rd [redis_deferring_client] - r config set lua-time-limit 10 - $rd eval {while true do end} 0 - after 200 - catch {r ping} e - assert_match {BUSY*} $e - r script kill - after 200 ; # Give some time to Lua to call the hook again... - assert_equal [r ping] "PONG" - } - - test {Timedout script link is still usable after Lua returns} { - r config set lua-time-limit 10 - r eval {for i=1,100000 do redis.call('ping') end return 'ok'} 0 - r ping - } {PONG} - - test {Timedout scripts that modified data can't be killed by SCRIPT KILL} { - set rd [redis_deferring_client] - r config set lua-time-limit 10 - $rd eval {redis.call('set',KEYS[1],'y'); while true do end} 1 x - after 200 - catch {r ping} e - assert_match {BUSY*} $e - catch {r script kill} e - assert_match {UNKILLABLE*} $e - catch {r ping} e - assert_match {BUSY*} $e - } - - # Note: keep this test at the end of this server stanza because it - # kills the server. - test {SHUTDOWN NOSAVE can kill a timedout script anyway} { - # The server sould be still unresponding to normal commands. - catch {r ping} e - assert_match {BUSY*} $e - catch {r shutdown nosave} - # Make sure the server was killed - catch {set rd [redis_deferring_client]} e - assert_match {*connection refused*} $e - } -} - -start_server {tags {"scripting repl"}} { - start_server {} { - test {Before the slave connects we issue two EVAL commands} { - # One with an error, but still executing a command. - # SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876 - catch { - r eval {redis.call('incr',KEYS[1]); redis.call('nonexisting')} 1 x - } - # One command is correct: - # SHA is: 6f5ade10a69975e903c6d07b10ea44c6382381a5 - r eval {return redis.call('incr',KEYS[1])} 1 x - } {2} - - test {Connect a slave to the main instance} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 role] eq {slave} && - [string match {*master_link_status:up*} [r -1 info replication]] - } else { - fail "Can't turn the instance into a slave" - } - } - - test {Now use EVALSHA against the master, with both SHAs} { - # The server should replicate successful and unsuccessful - # commands as EVAL instead of EVALSHA. - catch { - r evalsha 67164fc43fa971f76fd1aaeeaf60c1c178d25876 1 x - } - r evalsha 6f5ade10a69975e903c6d07b10ea44c6382381a5 1 x - } {4} - - test {If EVALSHA was replicated as EVAL, 'x' should be '4'} { - wait_for_condition 50 100 { - [r -1 get x] eq {4} - } else { - fail "Expected 4 in x, but value is '[r -1 get x]'" - } - } - - test {Replication of script multiple pushes to list with BLPOP} { - set rd [redis_deferring_client] - $rd brpop a 0 - r eval { - redis.call("lpush",KEYS[1],"1"); - redis.call("lpush",KEYS[1],"2"); - } 1 a - set res [$rd read] - $rd close - wait_for_condition 50 100 { - [r -1 lrange a 0 -1] eq [r lrange a 0 -1] - } else { - fail "Expected list 'a' in slave and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'" - } - set res - } {a 1} - - test {EVALSHA replication when first call is readonly} { - r del x - r eval {if tonumber(ARGV[1]) > 0 then redis.call('incr', KEYS[1]) end} 1 x 0 - r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 0 - r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 1 - wait_for_condition 50 100 { - [r -1 get x] eq {1} - } else { - fail "Expected 1 in x, but value is '[r -1 get x]'" - } - } - - test {Lua scripts using SELECT are replicated correctly} { - r eval { - redis.call("set","foo1","bar1") - redis.call("select","10") - redis.call("incr","x") - redis.call("select","11") - redis.call("incr","z") - } 0 - r eval { - redis.call("set","foo1","bar1") - redis.call("select","10") - redis.call("incr","x") - redis.call("select","11") - redis.call("incr","z") - } 0 - wait_for_condition 50 100 { - [r -1 debug digest] eq [r debug digest] - } else { - fail "Master-Slave desync after Lua script using SELECT." - } - } - } -} diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl deleted file mode 100644 index 9fa20a5c8..000000000 --- a/tests/unit/slowlog.tcl +++ /dev/null @@ -1,70 +0,0 @@ -start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { - test {SLOWLOG - check that it starts with an empty log} { - r slowlog len - } {0} - -# test {SLOWLOG - only logs commands taking more time than specified} { -# r config set slowlog-log-slower-than 100000 -# r ping -# assert_equal [r slowlog len] 0 -# r debug sleep 0.2 -# assert_equal [r slowlog len] 1 -# } - - test {SLOWLOG - max entries is correctly handled} { - r config set slowlog-log-slower-than 0 - r config set slowlog-max-len 10 - for {set i 0} {$i < 100} {incr i} { - r ping - } - r slowlog len - } {10} - - test {SLOWLOG - GET optional argument to limit output len works} { - llength [r slowlog get 5] - } {5} - - test {SLOWLOG - RESET subcommand works} { - r config set slowlog-log-slower-than 100000 - r slowlog reset - r slowlog len - } {0} - -# test {SLOWLOG - logged entry sanity check} { -# r debug sleep 0.2 -# set e [lindex [r slowlog get] 0] -# assert_equal [llength $e] 4 -# assert_equal [lindex $e 0] 105 -# assert_equal [expr {[lindex $e 2] > 100000}] 1 -# assert_equal [lindex $e 3] {debug sleep 0.2} -# } - - test {SLOWLOG - commands with too many arguments are trimmed} { - r config set slowlog-log-slower-than 0 - r slowlog reset - r sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 - set e [lindex [r slowlog get] 0] - lindex $e 3 - } {sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 {... (2 more arguments)}} - - test {SLOWLOG - too long arguments are trimmed} { - r config set slowlog-log-slower-than 0 - r slowlog reset - set arg [string repeat A 129] - r sadd set foo $arg - set e [lindex [r slowlog get] 0] - lindex $e 3 - } {sadd set foo {AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA... (1 more bytes)}} - -# test {SLOWLOG - EXEC is not logged, just executed commands} { -# r config set slowlog-log-slower-than 100000 -# r slowlog reset -# assert_equal [r slowlog len] 0 -# r multi -# r debug sleep 0.2 -# r exec -# assert_equal [r slowlog len] 1 -# set e [lindex [r slowlog get] 0] -# assert_equal [lindex $e 3] {debug sleep 0.2} -# } -} diff --git a/tests/unit/sort.tcl b/tests/unit/sort.tcl deleted file mode 100644 index a25ffeb5c..000000000 --- a/tests/unit/sort.tcl +++ /dev/null @@ -1,311 +0,0 @@ -start_server { - tags {"sort"} - overrides { - "list-max-ziplist-value" 16 - "list-max-ziplist-entries" 32 - "set-max-intset-entries" 32 - } -} { - proc create_random_dataset {num cmd} { - set tosort {} - set result {} - array set seenrand {} - r del tosort - for {set i 0} {$i < $num} {incr i} { - # Make sure all the weights are different because - # Redis does not use a stable sort but Tcl does. - while 1 { - randpath { - set rint [expr int(rand()*1000000)] - } { - set rint [expr rand()] - } - if {![info exists seenrand($rint)]} break - } - set seenrand($rint) x - r $cmd tosort $i - r set weight_$i $rint - r hset wobj_$i weight $rint - lappend tosort [list $i $rint] - } - set sorted [lsort -index 1 -real $tosort] - for {set i 0} {$i < $num} {incr i} { - lappend result [lindex $sorted $i 0] - } - set _ $result - } - - foreach {num cmd enc title} { - 16 lpush ziplist "Ziplist" - 1000 lpush linkedlist "Linked list" - 10000 lpush linkedlist "Big Linked list" - 16 sadd intset "Intset" - 1000 sadd hashtable "Hash table" - 10000 sadd hashtable "Big Hash table" - } { - set result [create_random_dataset $num $cmd] - assert_encoding $enc tosort - - test "$title: SORT BY key" { - assert_equal $result [r sort tosort BY weight_*] - } - - test "$title: SORT BY key with limit" { - assert_equal [lrange $result 5 9] [r sort tosort BY weight_* LIMIT 5 5] - } - - test "$title: SORT BY hash field" { - assert_equal $result [r sort tosort BY wobj_*->weight] - } - } - - set result [create_random_dataset 16 lpush] - test "SORT GET #" { - assert_equal [lsort -integer $result] [r sort tosort GET #] - } - - test "SORT GET " { - r del foo - set res [r sort tosort GET foo] - assert_equal 16 [llength $res] - foreach item $res { assert_equal {} $item } - } - - test "SORT GET (key and hash) with sanity check" { - set l1 [r sort tosort GET # GET weight_*] - set l2 [r sort tosort GET # GET wobj_*->weight] - foreach {id1 w1} $l1 {id2 w2} $l2 { - assert_equal $id1 $id2 - assert_equal $w1 [r get weight_$id1] - assert_equal $w2 [r get weight_$id1] - } - } - - test "SORT BY key STORE" { - r sort tosort BY weight_* store sort-res - assert_equal $result [r lrange sort-res 0 -1] - assert_equal 16 [r llen sort-res] - assert_encoding ziplist sort-res - } - - test "SORT BY hash field STORE" { - r sort tosort BY wobj_*->weight store sort-res - assert_equal $result [r lrange sort-res 0 -1] - assert_equal 16 [r llen sort-res] - assert_encoding ziplist sort-res - } - - test "SORT DESC" { - assert_equal [lsort -decreasing -integer $result] [r sort tosort DESC] - } - - test "SORT ALPHA against integer encoded strings" { - r del mylist - r lpush mylist 2 - r lpush mylist 1 - r lpush mylist 3 - r lpush mylist 10 - r sort mylist alpha - } {1 10 2 3} - - test "SORT sorted set" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r sort zset alpha desc - } {e d c b a} - - test "SORT sorted set BY nosort should retain ordering" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r multi - r sort zset by nosort asc - r sort zset by nosort desc - r exec - } {{a c e b d} {d b e c a}} - - test "SORT sorted set BY nosort + LIMIT" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - assert_equal [r sort zset by nosort asc limit 0 1] {a} - assert_equal [r sort zset by nosort desc limit 0 1] {d} - assert_equal [r sort zset by nosort asc limit 0 2] {a c} - assert_equal [r sort zset by nosort desc limit 0 2] {d b} - assert_equal [r sort zset by nosort limit 5 10] {} - assert_equal [r sort zset by nosort limit -10 100] {a c e b d} - } - - test "SORT sorted set BY nosort works as expected from scripts" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r eval { - return {redis.call('sort',KEYS[1],'by','nosort','asc'), - redis.call('sort',KEYS[1],'by','nosort','desc')} - } 1 zset - } {{a c e b d} {d b e c a}} - - test "SORT sorted set: +inf and -inf handling" { - r del zset - r zadd zset -100 a - r zadd zset 200 b - r zadd zset -300 c - r zadd zset 1000000 d - r zadd zset +inf max - r zadd zset -inf min - r zrange zset 0 -1 - } {min c a b d max} - - test "SORT regression for issue #19, sorting floats" { - r flushdb - set floats {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15} - foreach x $floats { - r lpush mylist $x - } - assert_equal [lsort -real $floats] [r sort mylist] - } - - test "SORT with STORE returns zero if result is empty (github issue 224)" { - r flushdb - r sort foo store bar - } {0} - - test "SORT with STORE does not create empty lists (github issue 224)" { - r flushdb - r lpush foo bar - r sort foo alpha limit 10 10 store zap - r exists zap - } {0} - - test "SORT with STORE removes key if result is empty (github issue 227)" { - r flushdb - r lpush foo bar - r sort emptylist store foo - r exists foo - } {0} - - test "SORT with BY and STORE should still order output" { - r del myset mylist - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r sort myset alpha by _ store mylist - r lrange mylist 0 -1 - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT will complain with numerical sorting and bad doubles (1)" { - r del myset - r sadd myset 1 2 3 4 not-a-double - set e {} - catch {r sort myset} e - set e - } {*ERR*double*} - - test "SORT will complain with numerical sorting and bad doubles (2)" { - r del myset - r sadd myset 1 2 3 4 - r mset score:1 10 score:2 20 score:3 30 score:4 not-a-double - set e {} - catch {r sort myset by score:*} e - set e - } {*ERR*double*} - - test "SORT BY sub-sorts lexicographically if score is the same" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - foreach ele {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} { - set score:$ele 100 - } - r sort myset by score:* - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT GET with pattern ending with just -> does not get hash field" { - r del mylist - r lpush mylist a - r set x:a-> 100 - r sort mylist by num get x:*-> - } {100} - - test "SORT by nosort retains native order for lists" { - r del testa - r lpush testa 2 1 4 3 5 - r sort testa by nosort - } {5 3 4 1 2} - - test "SORT by nosort plus store retains native order for lists" { - r del testa - r lpush testa 2 1 4 3 5 - r sort testa by nosort store testb - r lrange testb 0 -1 - } {5 3 4 1 2} - - test "SORT by nosort with limit returns based on original list order" { - r sort testa by nosort limit 0 3 store testb - r lrange testb 0 -1 - } {5 3 4} - - tags {"slow"} { - set num 100 - set res [create_random_dataset $num lpush] - - test "SORT speed, $num element list BY key, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY weight_* LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list BY hash field, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY wobj_*->weight LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list directly, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list BY , 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY nokey LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - } -} diff --git a/tests/unit/tcl/aof-race.tcl b/tests/unit/tcl/aof-race.tcl deleted file mode 100644 index 207f20739..000000000 --- a/tests/unit/tcl/aof-race.tcl +++ /dev/null @@ -1,35 +0,0 @@ -set defaults { appendonly {yes} appendfilename {appendonly.aof} } -set server_path [tmpdir server.aof] -set aof_path "$server_path/appendonly.aof" - -proc start_server_aof {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - start_server [list overrides $config] $code -} - -tags {"aof"} { - # Specific test for a regression where internal buffers were not properly - # cleaned after a child responsible for an AOF rewrite exited. This buffer - # was subsequently appended to the new AOF, resulting in duplicate commands. - start_server_aof [list dir $server_path] { - set client [redis [srv host] [srv port]] - set bench [open "|src/redis-benchmark -q -p [srv port] -c 20 -n 20000 incr foo" "r+"] - after 100 - - # Benchmark should be running by now: start background rewrite - $client bgrewriteaof - - # Read until benchmark pipe reaches EOF - while {[string length [read $bench]] > 0} {} - - # Check contents of foo - assert_equal 20000 [$client get foo] - } - - # Restart server to replay AOF - start_server_aof [list dir $server_path] { - set client [redis [srv host] [srv port]] - assert_equal 20000 [$client get foo] - } -} diff --git a/tests/unit/tcl/aof.tcl b/tests/unit/tcl/aof.tcl deleted file mode 100644 index 7ea70943c..000000000 --- a/tests/unit/tcl/aof.tcl +++ /dev/null @@ -1,236 +0,0 @@ -set defaults { appendonly {yes} appendfilename {appendonly.aof} } -set server_path [tmpdir server.aof] -set aof_path "$server_path/appendonly.aof" - -proc append_to_aof {str} { - upvar fp fp - puts -nonewline $fp $str -} - -proc create_aof {code} { - upvar fp fp aof_path aof_path - set fp [open $aof_path w+] - uplevel 1 $code - close $fp -} - -proc start_server_aof {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - set srv [start_server [list overrides $config]] - uplevel 1 $code - kill_server $srv -} - -tags {"aof"} { - ## Server can start when aof-load-truncated is set to yes and AOF - ## is truncated, with an incomplete MULTI block. - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [formatCommand multi] - append_to_aof [formatCommand set bar world] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Unfinished MULTI: Server should start if load-truncated is yes" { - assert_equal 1 [is_alive $srv] - } - } - - ## Should also start with truncated AOF without incomplete MULTI block. - create_aof { - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [string range [formatCommand incr foo] 0 end-1] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Short read: Server should start if load-truncated is yes" { - assert_equal 1 [is_alive $srv] - } - - set client [redis [dict get $srv host] [dict get $srv port]] - - test "Truncated AOF loaded: we expect foo to be equal to 5" { - assert {[$client get foo] eq "5"} - } - - test "Append a new command after loading an incomplete AOF" { - $client incr foo - } - } - - # Now the AOF file is expected to be correct - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Short read + command: Server should start" { - assert_equal 1 [is_alive $srv] - } - - set client [redis [dict get $srv host] [dict get $srv port]] - - test "Truncated AOF loaded: we expect foo to be equal to 6 now" { - assert {[$client get foo] eq "6"} - } - } - - ## Test that the server exits when the AOF contains a format error - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof "!!!" - append_to_aof [formatCommand set foo hello] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Bad format: Server should have logged an error" { - set pattern "*Bad file format reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test the server doesn't start when the AOF contains an unfinished MULTI - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [formatCommand multi] - append_to_aof [formatCommand set bar world] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Unfinished MULTI: Server should have logged an error" { - set pattern "*Unexpected end of file reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test that the server exits when the AOF contains a short read - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [string range [formatCommand set bar world] 0 end-1] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Short read: Server should have logged an error" { - set pattern "*Unexpected end of file reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test that redis-check-aof indeed sees this AOF is not valid - test "Short read: Utility should confirm the AOF is not valid" { - catch { - exec src/redis-check-aof $aof_path - } result - assert_match "*not valid*" $result - } - - test "Short read: Utility should be able to fix the AOF" { - set result [exec src/redis-check-aof --fix $aof_path << "y\n"] - assert_match "*Successfully truncated AOF*" $result - } - - ## Test that the server can be started using the truncated AOF - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Fixed AOF: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "Fixed AOF: Keyspace should contain values that were parseable" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal "hello" [$client get foo] - assert_equal "" [$client get bar] - } - } - - ## Test that SPOP (that modifies the client's argc/argv) is correctly free'd - create_aof { - append_to_aof [formatCommand sadd set foo] - append_to_aof [formatCommand sadd set bar] - append_to_aof [formatCommand spop set] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "AOF+SPOP: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "AOF+SPOP: Set should have 1 member" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal 1 [$client scard set] - } - } - - ## Test that EXPIREAT is loaded correctly - create_aof { - append_to_aof [formatCommand rpush list foo] - append_to_aof [formatCommand expireat list 1000] - append_to_aof [formatCommand rpush list bar] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "AOF+EXPIRE: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "AOF+EXPIRE: List should be empty" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal 0 [$client llen list] - } - } - - start_server {overrides {appendonly {yes} appendfilename {appendonly.aof}}} { - test {Redis should not try to convert DEL into EXPIREAT for EXPIRE -1} { - r set x 10 - r expire x -1 - } - } -} diff --git a/tests/unit/tcl/convert-zipmap-hash-on-load.tcl b/tests/unit/tcl/convert-zipmap-hash-on-load.tcl deleted file mode 100644 index cf3577f28..000000000 --- a/tests/unit/tcl/convert-zipmap-hash-on-load.tcl +++ /dev/null @@ -1,35 +0,0 @@ -# Copy RDB with zipmap encoded hash to server path -set server_path [tmpdir "server.convert-zipmap-hash-on-load"] - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb"]] { - test "RDB load zipmap hash: converts to ziplist" { - r select 0 - - assert_match "*ziplist*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-entries" 1]] { - test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded" { - r select 0 - - assert_match "*hashtable*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-value" 1]] { - test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded" { - r select 0 - - assert_match "*hashtable*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} diff --git a/tests/unit/tcl/rdb.tcl b/tests/unit/tcl/rdb.tcl deleted file mode 100644 index 71876a6ed..000000000 --- a/tests/unit/tcl/rdb.tcl +++ /dev/null @@ -1,98 +0,0 @@ -set server_path [tmpdir "server.rdb-encoding-test"] - -# Copy RDB with different encodings in server path -exec cp tests/assets/encodings.rdb $server_path - -start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rdb"]] { - test "RDB encoding loading test" { - r select 0 - csvdump r - } {"compressible","string","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" -"hash","hash","a","1","aa","10","aaa","100","b","2","bb","20","bbb","200","c","3","cc","30","ccc","300","ddd","400","eee","5000000000", -"hash_zipped","hash","a","1","b","2","c","3", -"list","list","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000", -"list_zipped","list","1","2","3","a","b","c","100000","6000000000", -"number","string","10" -"set","set","1","100000","2","3","6000000000","a","b","c", -"set_zipped_1","set","1","2","3","4", -"set_zipped_2","set","100000","200000","300000","400000", -"set_zipped_3","set","1000000000","2000000000","3000000000","4000000000","5000000000","6000000000", -"string","string","Hello World" -"zset","zset","a","1","b","2","c","3","aa","10","bb","20","cc","30","aaa","100","bbb","200","ccc","300","aaaa","1000","cccc","123456789","bbbb","5000000000", -"zset_zipped","zset","a","1","b","2","c","3", -} -} - -set server_path [tmpdir "server.rdb-startup-test"] - -start_server [list overrides [list "dir" $server_path]] { - test {Server started empty with non-existing RDB file} { - r debug digest - } {0000000000000000000000000000000000000000} - # Save an RDB file, needed for the next test. - r save -} - -start_server [list overrides [list "dir" $server_path]] { - test {Server started empty with empty RDB file} { - r debug digest - } {0000000000000000000000000000000000000000} -} - -# Helper function to start a server and kill it, just to check the error -# logged. -set defaults {} -proc start_server_and_kill_it {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - set srv [start_server [list overrides $config]] - uplevel 1 $code - kill_server $srv -} - -# Make the RDB file unreadable -file attributes [file join $server_path dump.rdb] -permissions 0222 - -# Detect root account (it is able to read the file even with 002 perm) -set isroot 0 -catch { - open [file join $server_path dump.rdb] - set isroot 1 -} - -# Now make sure the server aborted with an error -if {!$isroot} { - start_server_and_kill_it [list "dir" $server_path] { - test {Server should not start if RDB file can't be open} { - wait_for_condition 50 100 { - [string match {*Fatal error loading*} \ - [exec tail -n1 < [dict get $srv stdout]]] - } else { - fail "Server started even if RDB was unreadable!" - } - } - } -} - -# Fix permissions of the RDB file. -file attributes [file join $server_path dump.rdb] -permissions 0666 - -# Corrupt its CRC64 checksum. -set filesize [file size [file join $server_path dump.rdb]] -set fd [open [file join $server_path dump.rdb] r+] -fconfigure $fd -translation binary -seek $fd -8 end -puts -nonewline $fd "foobar00"; # Corrupt the checksum -close $fd - -# Now make sure the server aborted with an error -start_server_and_kill_it [list "dir" $server_path] { - test {Server should not start if RDB is corrupted} { - wait_for_condition 50 100 { - [string match {*RDB checksum*} \ - [exec tail -n1 < [dict get $srv stdout]]] - } else { - fail "Server started even if RDB was corrupted!" - } - } -} diff --git a/tests/unit/tcl/redis-cli.tcl b/tests/unit/tcl/redis-cli.tcl deleted file mode 100644 index 40e4222e3..000000000 --- a/tests/unit/tcl/redis-cli.tcl +++ /dev/null @@ -1,208 +0,0 @@ -start_server {tags {"cli"}} { - proc open_cli {} { - set ::env(TERM) dumb - set fd [open [format "|src/redis-cli -p %d -n 9" [srv port]] "r+"] - fconfigure $fd -buffering none - fconfigure $fd -blocking false - fconfigure $fd -translation binary - assert_equal "redis> " [read_cli $fd] - set _ $fd - } - - proc close_cli {fd} { - close $fd - } - - proc read_cli {fd} { - set buf [read $fd] - while {[string length $buf] == 0} { - # wait some time and try again - after 10 - set buf [read $fd] - } - set _ $buf - } - - proc write_cli {fd buf} { - puts $fd $buf - flush $fd - } - - # Helpers to run tests in interactive mode - proc run_command {fd cmd} { - write_cli $fd $cmd - set lines [split [read_cli $fd] "\n"] - assert_equal "redis> " [lindex $lines end] - join [lrange $lines 0 end-1] "\n" - } - - proc test_interactive_cli {name code} { - set ::env(FAKETTY) 1 - set fd [open_cli] - test "Interactive CLI: $name" $code - close_cli $fd - unset ::env(FAKETTY) - } - - # Helpers to run tests where stdout is not a tty - proc write_tmpfile {contents} { - set tmp [tmpfile "cli"] - set tmpfd [open $tmp "w"] - puts -nonewline $tmpfd $contents - close $tmpfd - set _ $tmp - } - - proc _run_cli {opts args} { - set cmd [format "src/redis-cli -p %d -n 9 $args" [srv port]] - foreach {key value} $opts { - if {$key eq "pipe"} { - set cmd "sh -c \"$value | $cmd\"" - } - if {$key eq "path"} { - set cmd "$cmd < $value" - } - } - - set fd [open "|$cmd" "r"] - fconfigure $fd -buffering none - fconfigure $fd -translation binary - set resp [read $fd 1048576] - close $fd - set _ $resp - } - - proc run_cli {args} { - _run_cli {} {*}$args - } - - proc run_cli_with_input_pipe {cmd args} { - _run_cli [list pipe $cmd] {*}$args - } - - proc run_cli_with_input_file {path args} { - _run_cli [list path $path] {*}$args - } - - proc test_nontty_cli {name code} { - test "Non-interactive non-TTY CLI: $name" $code - } - - # Helpers to run tests where stdout is a tty (fake it) - proc test_tty_cli {name code} { - set ::env(FAKETTY) 1 - test "Non-interactive TTY CLI: $name" $code - unset ::env(FAKETTY) - } - - test_interactive_cli "INFO response should be printed raw" { - set lines [split [run_command $fd info] "\n"] - foreach line $lines { - assert [regexp {^[a-z0-9_]+:[a-z0-9_]+} $line] - } - } - - test_interactive_cli "Status reply" { - assert_equal "OK" [run_command $fd "set key foo"] - } - - test_interactive_cli "Integer reply" { - assert_equal "(integer) 1" [run_command $fd "incr counter"] - } - - test_interactive_cli "Bulk reply" { - r set key foo - assert_equal "\"foo\"" [run_command $fd "get key"] - } - - test_interactive_cli "Multi-bulk reply" { - r rpush list foo - r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"" [run_command $fd "lrange list 0 -1"] - } - - test_interactive_cli "Parsing quotes" { - assert_equal "OK" [run_command $fd "set key \"bar\""] - assert_equal "bar" [r get key] - assert_equal "OK" [run_command $fd "set key \" bar \""] - assert_equal " bar " [r get key] - assert_equal "OK" [run_command $fd "set key \"\\\"bar\\\"\""] - assert_equal "\"bar\"" [r get key] - assert_equal "OK" [run_command $fd "set key \"\tbar\t\""] - assert_equal "\tbar\t" [r get key] - - # invalid quotation - assert_equal "Invalid argument(s)" [run_command $fd "get \"\"key"] - assert_equal "Invalid argument(s)" [run_command $fd "get \"key\"x"] - - # quotes after the argument are weird, but should be allowed - assert_equal "OK" [run_command $fd "set key\"\" bar"] - assert_equal "bar" [r get key] - } - - test_tty_cli "Status reply" { - assert_equal "OK\n" [run_cli set key bar] - assert_equal "bar" [r get key] - } - - test_tty_cli "Integer reply" { - r del counter - assert_equal "(integer) 1\n" [run_cli incr counter] - } - - test_tty_cli "Bulk reply" { - r set key "tab\tnewline\n" - assert_equal "\"tab\\tnewline\\n\"\n" [run_cli get key] - } - - test_tty_cli "Multi-bulk reply" { - r del list - r rpush list foo - r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"\n" [run_cli lrange list 0 -1] - } - - test_tty_cli "Read last argument from pipe" { - assert_equal "OK\n" [run_cli_with_input_pipe "echo foo" set key] - assert_equal "foo\n" [r get key] - } - - test_tty_cli "Read last argument from file" { - set tmpfile [write_tmpfile "from file"] - assert_equal "OK\n" [run_cli_with_input_file $tmpfile set key] - assert_equal "from file" [r get key] - } - - test_nontty_cli "Status reply" { - assert_equal "OK" [run_cli set key bar] - assert_equal "bar" [r get key] - } - - test_nontty_cli "Integer reply" { - r del counter - assert_equal "1" [run_cli incr counter] - } - - test_nontty_cli "Bulk reply" { - r set key "tab\tnewline\n" - assert_equal "tab\tnewline\n" [run_cli get key] - } - - test_nontty_cli "Multi-bulk reply" { - r del list - r rpush list foo - r rpush list bar - assert_equal "foo\nbar" [run_cli lrange list 0 -1] - } - - test_nontty_cli "Read last argument from pipe" { - assert_equal "OK" [run_cli_with_input_pipe "echo foo" set key] - assert_equal "foo\n" [r get key] - } - - test_nontty_cli "Read last argument from file" { - set tmpfile [write_tmpfile "from file"] - assert_equal "OK" [run_cli_with_input_file $tmpfile set key] - assert_equal "from file" [r get key] - } -} diff --git a/tests/unit/tcl/replication-2.tcl b/tests/unit/tcl/replication-2.tcl deleted file mode 100644 index 9446e5cd9..000000000 --- a/tests/unit/tcl/replication-2.tcl +++ /dev/null @@ -1,87 +0,0 @@ -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - after 1000 - s -1 role - } {slave} - - test {If min-slaves-to-write is honored, write is accepted} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r set foo 12345 - wait_for_condition 50 100 { - [r -1 get foo] eq {12345} - } else { - fail "Write did not reached slave" - } - } - - test {No write if min-slaves-to-write is < attached slaves} { - r config set min-slaves-to-write 2 - r config set min-slaves-max-lag 10 - catch {r set foo 12345} err - set err - } {NOREPLICAS*} - - test {If min-slaves-to-write is honored, write is accepted (again)} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r set foo 12345 - wait_for_condition 50 100 { - [r -1 get foo] eq {12345} - } else { - fail "Write did not reached slave" - } - } - - test {No write if min-slaves-max-lag is > of the slave lag} { - r -1 deferred 1 - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 2 - r -1 debug sleep 6 - assert {[r set foo 12345] eq {OK}} - after 4000 - catch {r set foo 12345} err - assert {[r -1 read] eq {OK}} - r -1 deferred 0 - set err - } {NOREPLICAS*} - - test {min-slaves-to-write is ignored by slaves} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r -1 config set min-slaves-to-write 1 - r -1 config set min-slaves-max-lag 10 - r set foo aaabbb - wait_for_condition 50 100 { - [r -1 get foo] eq {aaabbb} - } else { - fail "Write did not reached slave" - } - } - - # Fix parameters for the next test to work - r config set min-slaves-to-write 0 - r -1 config set min-slaves-to-write 0 - r flushall - - test {MASTER and SLAVE dataset should be identical after complex ops} { - createComplexDataset r 10000 - after 500 - if {[r debug digest] ne [r -1 debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} diff --git a/tests/unit/tcl/replication-3.tcl b/tests/unit/tcl/replication-3.tcl deleted file mode 100644 index 0fcbad45b..000000000 --- a/tests/unit/tcl/replication-3.tcl +++ /dev/null @@ -1,101 +0,0 @@ -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - if {$::accurate} {set numops 50000} else {set numops 5000} - - test {MASTER and SLAVE consistency with expire} { - createComplexDataset r $numops useexpire - after 4000 ;# Make sure everything expired before taking the digest - r keys * ;# Force DEL syntesizing to slave - after 1000 ;# Wait another second. Now everything should be fine. - if {[r debug digest] ne [r -1 debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} - -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - set numops 20000 ;# Enough to trigger the Script Cache LRU eviction. - - # While we are at it, enable AOF to test it will be consistent as well - # after the test. - r config set appendonly yes - - test {MASTER and SLAVE consistency with EVALSHA replication} { - array set oldsha {} - for {set j 0} {$j < $numops} {incr j} { - set key "key:$j" - # Make sure to create scripts that have different SHA1s - set script "return redis.call('incr','$key')" - set sha1 [r eval "return redis.sha1hex(\"$script\")" 0] - set oldsha($j) $sha1 - r eval $script 0 - set res [r evalsha $sha1 0] - assert {$res == 2} - # Additionally call one of the old scripts as well, at random. - set res [r evalsha $oldsha([randomInt $j]) 0] - assert {$res > 2} - - # Trigger an AOF rewrite while we are half-way, this also - # forces the flush of the script cache, and we will cover - # more code as a result. - if {$j == $numops / 2} { - catch {r bgrewriteaof} - } - } - - wait_for_condition 50 100 { - [r dbsize] == $numops && - [r -1 dbsize] == $numops && - [r debug digest] eq [r -1 debug digest] - } else { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - - } - - set old_digest [r debug digest] - r config set appendonly no - r debug loadaof - set new_digest [r debug digest] - assert {$old_digest eq $new_digest} - } - } -} diff --git a/tests/unit/tcl/replication-4.tcl b/tests/unit/tcl/replication-4.tcl deleted file mode 100644 index 6db9ffe2b..000000000 --- a/tests/unit/tcl/replication-4.tcl +++ /dev/null @@ -1,136 +0,0 @@ -proc start_bg_complex_data {host port db ops} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops & -} - -proc stop_bg_complex_data {handle} { - catch {exec /bin/kill -9 $handle} -} - -start_server {tags {"repl"}} { - start_server {} { - - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000] - set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000] - set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - after 1000 - s 0 role - } {slave} - - test {Test replication with parallel clients writing in differnet DBs} { - after 5000 - stop_bg_complex_data $load_handle0 - stop_bg_complex_data $load_handle1 - stop_bg_complex_data $load_handle2 - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - - if {[$master debug digest] ne [$slave debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} - -start_server {tags {"repl"}} { - start_server {} { - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [s 0 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - test {With min-slaves-to-write (1,3): master should be writable} { - $master config set min-slaves-max-lag 3 - $master config set min-slaves-to-write 1 - $master set foo bar - } {OK} - - test {With min-slaves-to-write (2,3): master should not be writable} { - $master config set min-slaves-max-lag 3 - $master config set min-slaves-to-write 2 - catch {$master set foo bar} e - set e - } {NOREPLICAS*} - - test {With min-slaves-to-write: master not writable with lagged slave} { - $master config set min-slaves-max-lag 2 - $master config set min-slaves-to-write 1 - assert {[$master set foo bar] eq {OK}} - $slave deferred 1 - $slave debug sleep 6 - after 4000 - catch {$master set foo bar} e - set e - } {NOREPLICAS*} - } -} - -start_server {tags {"repl"}} { - start_server {} { - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [s 0 role] eq {slave} - } else { - fail "Replication not started." - } - } - - test {Replication: commands with many arguments (issue #1221)} { - # We now issue large MSET commands, that may trigger a specific - # class of bugs, see issue #1221. - for {set j 0} {$j < 100} {incr j} { - set cmd [list mset] - for {set x 0} {$x < 1000} {incr x} { - lappend cmd [randomKey] [randomValue] - } - $master {*}$cmd - } - - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - } - } -} diff --git a/tests/unit/tcl/replication-psync.tcl b/tests/unit/tcl/replication-psync.tcl deleted file mode 100644 index f131dafe3..000000000 --- a/tests/unit/tcl/replication-psync.tcl +++ /dev/null @@ -1,115 +0,0 @@ -proc start_bg_complex_data {host port db ops} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops & -} - -proc stop_bg_complex_data {handle} { - catch {exec /bin/kill -9 $handle} -} - -# Creates a master-slave pair and breaks the link continuously to force -# partial resyncs attempts, all this while flooding the master with -# write queries. -# -# You can specifiy backlog size, ttl, delay before reconnection, test duration -# in seconds, and an additional condition to verify at the end. -proc test_psync {descr duration backlog_size backlog_ttl delay cond} { - start_server {tags {"repl"}} { - start_server {} { - - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - $master config set repl-backlog-size $backlog_size - $master config set repl-backlog-ttl $backlog_ttl - - set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000] - set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000] - set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000] - - test {Slave should be able to synchronize with the master} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [lindex [r role] 0] eq {slave} && - [lindex [r role] 3] eq {connected} - } else { - fail "Replication not started." - } - } - - # Check that the background clients are actually writing. - test {Detect write load to master} { - wait_for_condition 50 100 { - [$master dbsize] > 100 - } else { - fail "Can't detect write load from background clients." - } - } - - test "Test replication partial resync: $descr" { - # Now while the clients are writing data, break the maste-slave - # link multiple times. - for {set j 0} {$j < $duration*10} {incr j} { - after 100 - # catch {puts "MASTER [$master dbsize] keys, SLAVE [$slave dbsize] keys"} - - if {($j % 20) == 0} { - catch { - if {$delay} { - $slave multi - $slave client kill $master_host:$master_port - $slave debug sleep $delay - $slave exec - } else { - $slave client kill $master_host:$master_port - } - } - } - } - stop_bg_complex_data $load_handle0 - stop_bg_complex_data $load_handle1 - stop_bg_complex_data $load_handle2 - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - - if {[$master debug digest] ne [$slave debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - eval $cond - } - } - } -} - -test_psync {ok psync} 6 1000000 3600 0 { - assert {[s -1 sync_partial_ok] > 0} -} - -test_psync {no backlog} 6 100 3600 0.5 { - assert {[s -1 sync_partial_err] > 0} -} - -test_psync {ok after delay} 3 100000000 3600 3 { - assert {[s -1 sync_partial_ok] > 0} -} - -test_psync {backlog expired} 3 100000000 1 3 { - assert {[s -1 sync_partial_err] > 0} -} diff --git a/tests/unit/tcl/replication.tcl b/tests/unit/tcl/replication.tcl deleted file mode 100644 index bb907eba8..000000000 --- a/tests/unit/tcl/replication.tcl +++ /dev/null @@ -1,215 +0,0 @@ -start_server {tags {"repl"}} { - set A [srv 0 client] - set A_host [srv 0 host] - set A_port [srv 0 port] - start_server {} { - set B [srv 0 client] - set B_host [srv 0 host] - set B_port [srv 0 port] - - test {Set instance A as slave of B} { - $A slaveof $B_host $B_port - wait_for_condition 50 100 { - [lindex [$A role] 0] eq {slave} && - [string match {*master_link_status:up*} [$A info replication]] - } else { - fail "Can't turn the instance into a slave" - } - } - - test {BRPOPLPUSH replication, when blocking against empty list} { - set rd [redis_deferring_client] - $rd brpoplpush a b 5 - r lpush a foo - wait_for_condition 50 100 { - [$A debug digest] eq [$B debug digest] - } else { - fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" - } - } - - test {BRPOPLPUSH replication, list exists} { - set rd [redis_deferring_client] - r lpush c 1 - r lpush c 2 - r lpush c 3 - $rd brpoplpush c d 5 - after 1000 - assert_equal [$A debug digest] [$B debug digest] - } - - test {BLPOP followed by role change, issue #2473} { - set rd [redis_deferring_client] - $rd blpop foo 0 ; # Block while B is a master - - # Turn B into master of A - $A slaveof no one - $B slaveof $A_host $A_port - wait_for_condition 50 100 { - [lindex [$B role] 0] eq {slave} && - [string match {*master_link_status:up*} [$B info replication]] - } else { - fail "Can't turn the instance into a slave" - } - - # Push elements into the "foo" list of the new slave. - # If the client is still attached to the instance, we'll get - # a desync between the two instances. - $A rpush foo a b c - after 100 - - wait_for_condition 50 100 { - [$A debug digest] eq [$B debug digest] && - [$A lrange foo 0 -1] eq {a b c} && - [$B lrange foo 0 -1] eq {a b c} - } else { - fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" - } - } - } -} - -start_server {tags {"repl"}} { - r set mykey foo - - start_server {} { - test {Second server should have role master at first} { - s role - } {master} - - test {SLAVEOF should start with link status "down"} { - r slaveof [srv -1 host] [srv -1 port] - s master_link_status - } {down} - - test {The role should immediately be changed to "slave"} { - s role - } {slave} - - wait_for_sync r - test {Sync should have transferred keys from master} { - r get mykey - } {foo} - - test {The link status should be up} { - s master_link_status - } {up} - - test {SET on the master should immediately propagate} { - r -1 set mykey bar - - wait_for_condition 500 100 { - [r 0 get mykey] eq {bar} - } else { - fail "SET on master did not propagated on slave" - } - } - - test {FLUSHALL should replicate} { - r -1 flushall - if {$::valgrind} {after 2000} - list [r -1 dbsize] [r 0 dbsize] - } {0 0} - - test {ROLE in master reports master with a slave} { - set res [r -1 role] - lassign $res role offset slaves - assert {$role eq {master}} - assert {$offset > 0} - assert {[llength $slaves] == 1} - lassign [lindex $slaves 0] master_host master_port slave_offset - assert {$slave_offset <= $offset} - } - - test {ROLE in slave reports slave in connected state} { - set res [r role] - lassign $res role master_host master_port slave_state slave_offset - assert {$role eq {slave}} - assert {$slave_state eq {connected}} - } - } -} - -foreach dl {no yes} { - start_server {tags {"repl"}} { - set master [srv 0 client] - $master config set repl-diskless-sync $dl - set master_host [srv 0 host] - set master_port [srv 0 port] - set slaves {} - set load_handle0 [start_write_load $master_host $master_port 3] - set load_handle1 [start_write_load $master_host $master_port 5] - set load_handle2 [start_write_load $master_host $master_port 20] - set load_handle3 [start_write_load $master_host $master_port 8] - set load_handle4 [start_write_load $master_host $master_port 4] - start_server {} { - lappend slaves [srv 0 client] - start_server {} { - lappend slaves [srv 0 client] - start_server {} { - lappend slaves [srv 0 client] - test "Connect multiple slaves at the same time (issue #141), diskless=$dl" { - # Send SLAVEOF commands to slaves - [lindex $slaves 0] slaveof $master_host $master_port - [lindex $slaves 1] slaveof $master_host $master_port - [lindex $slaves 2] slaveof $master_host $master_port - - # Wait for all the three slaves to reach the "online" - # state from the POV of the master. - set retry 500 - while {$retry} { - set info [r -3 info] - if {[string match {*slave0:*state=online*slave1:*state=online*slave2:*state=online*} $info]} { - break - } else { - incr retry -1 - after 100 - } - } - if {$retry == 0} { - error "assertion:Slaves not correctly synchronized" - } - - # Wait that slaves acknowledge they are online so - # we are sure that DBSIZE and DEBUG DIGEST will not - # fail because of timing issues. - wait_for_condition 500 100 { - [lindex [[lindex $slaves 0] role] 3] eq {connected} && - [lindex [[lindex $slaves 1] role] 3] eq {connected} && - [lindex [[lindex $slaves 2] role] 3] eq {connected} - } else { - fail "Slaves still not connected after some time" - } - - # Stop the write load - stop_write_load $load_handle0 - stop_write_load $load_handle1 - stop_write_load $load_handle2 - stop_write_load $load_handle3 - stop_write_load $load_handle4 - - # Make sure that slaves and master have same - # number of keys - wait_for_condition 500 100 { - [$master dbsize] == [[lindex $slaves 0] dbsize] && - [$master dbsize] == [[lindex $slaves 1] dbsize] && - [$master dbsize] == [[lindex $slaves 2] dbsize] - } else { - fail "Different number of keys between masted and slave after too long time." - } - - # Check digests - set digest [$master debug digest] - set digest0 [[lindex $slaves 0] debug digest] - set digest1 [[lindex $slaves 1] debug digest] - set digest2 [[lindex $slaves 2] debug digest] - assert {$digest ne 0000000000000000000000000000000000000000} - assert {$digest eq $digest0} - assert {$digest eq $digest1} - assert {$digest eq $digest2} - } - } - } - } - } -} diff --git a/tests/unit/type.tcl b/tests/unit/type.tcl deleted file mode 100644 index 2b5b9045a..000000000 --- a/tests/unit/type.tcl +++ /dev/null @@ -1,50 +0,0 @@ -start_server {tags {"type"}} { - - test "type none" { - r flushdb - assert_equal none [r type key] - } - - test "type command" { - r flushdb - - r set key1 key1 - assert_equal string [r type key1] - - r hset key2 key key2 - assert_equal hash [r type key2] - - r lpush key3 key3 - assert_equal list [r type key3] - - r zadd key4 100 key4 - assert_equal zset [r type key4] - - r sadd key5 key5 - assert_equal set [r type key5] - } - - test "ptype none" { - r flushdb - assert_equal {} [r ptype key] - } - - test "ptype command" { - r flushdb - - r set key1 key1 - assert_equal string [r ptype key1] - - r hset key1 key key1 - assert_equal {string hash} [r ptype key1] - - r lpush key1 key1 - assert_equal {string hash list} [r ptype key1] - - r zadd key1 100 key1 - assert_equal {string hash list zset} [r ptype key1] - - r sadd key1 key1 - assert_equal {string hash list zset set} [r ptype key1] - } -} \ No newline at end of file diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl deleted file mode 100644 index e472dabd6..000000000 --- a/tests/unit/type/hash.tcl +++ /dev/null @@ -1,863 +0,0 @@ -start_server {tags {"hash"}} { - test {HSET/HLEN - Small hash creation} { - array set smallhash {} - for {set i 0} {$i < 8} {incr i} { - set key __avoid_collisions__[randstring 0 8 alpha] - set val __avoid_collisions__[randstring 0 8 alpha] - if {[info exists smallhash($key)]} { - incr i -1 - continue - } - r hset smallhash $key $val - set smallhash($key) $val - } - list [r hlen smallhash] - } {8} - -# test {Is the small hash encoded with a listpack?} { -# assert_encoding listpack smallhash -# } - - proc create_hash {key entries} { - r del $key - foreach entry $entries { - r hset $key [lindex $entry 0] [lindex $entry 1] - } - } - - proc get_keys {l} { - set res {} - foreach entry $l { - set key [lindex $entry 0] - lappend res $key - } - return $res - } - -# foreach {type contents} "listpack {{a 1} {b 2} {c 3}} hashtable {{a 1} {b 2} {[randstring 70 90 alpha] 3}}" { -# set original_max_value [lindex [r config get hash-max-ziplist-value] 1] -# r config set hash-max-ziplist-value 10 -# create_hash myhash $contents -# assert_encoding $type myhash -# -# # coverage for objectComputeSize -# assert_morethan [memory_usage myhash] 0 -# -# test "HRANDFIELD - $type" { -# unset -nocomplain myhash -# array set myhash {} -# for {set i 0} {$i < 100} {incr i} { -# set key [r hrandfield myhash] -# set myhash($key) 1 -# } -# assert_equal [lsort [get_keys $contents]] [lsort [array names myhash]] -# } -# r config set hash-max-ziplist-value $original_max_value -# } - -# test "HRANDFIELD with RESP3" { -# r hello 3 -# set res [r hrandfield myhash 3 withvalues] -# assert_equal [llength $res] 3 -# assert_equal [llength [lindex $res 1]] 2 -# -# set res [r hrandfield myhash 3] -# assert_equal [llength $res] 3 -# assert_equal [llength [lindex $res 1]] 1 -# r hello 2 -# } - -# test "HRANDFIELD count of 0 is handled correctly" { -# r hrandfield myhash 0 -# } {} - -# test "HRANDFIELD count overflow" { -# r hmset myhash a 1 -# assert_error {*value is out of range*} {r hrandfield myhash -9223372036854770000 withvalues} -# assert_error {*value is out of range*} {r hrandfield myhash -9223372036854775808 withvalues} -# assert_error {*value is out of range*} {r hrandfield myhash -9223372036854775808} -# } {} -# -# test "HRANDFIELD with against non existing key" { -# r hrandfield nonexisting_key 100 -# } {} -# -# # Make sure we can distinguish between an empty array and a null response -# r readraw 1 -# -# test "HRANDFIELD count of 0 is handled correctly - emptyarray" { -# r hrandfield myhash 0 -# } {*0} -# -# test "HRANDFIELD with against non existing key - emptyarray" { -# r hrandfield nonexisting_key 100 -# } {*0} -# -# r readraw 0 -# -# foreach {type contents} " -# hashtable {{a 1} {b 2} {c 3} {d 4} {e 5} {6 f} {7 g} {8 h} {9 i} {[randstring 70 90 alpha] 10}} -# listpack {{a 1} {b 2} {c 3} {d 4} {e 5} {6 f} {7 g} {8 h} {9 i} {10 j}} " { -# test "HRANDFIELD with - $type" { -# set original_max_value [lindex [r config get hash-max-ziplist-value] 1] -# r config set hash-max-ziplist-value 10 -# create_hash myhash $contents -# assert_encoding $type myhash -# -# # create a dict for easy lookup -# set mydict [dict create {*}[r hgetall myhash]] -# -# # We'll stress different parts of the code, see the implementation -# # of HRANDFIELD for more information, but basically there are -# # four different code paths. -# -# # PATH 1: Use negative count. -# -# # 1) Check that it returns repeated elements with and without values. -# set res [r hrandfield myhash -20] -# assert_equal [llength $res] 20 -# set res [r hrandfield myhash -1001] -# assert_equal [llength $res] 1001 -# # again with WITHVALUES -# set res [r hrandfield myhash -20 withvalues] -# assert_equal [llength $res] 40 -# set res [r hrandfield myhash -1001 withvalues] -# assert_equal [llength $res] 2002 -# -# # Test random uniform distribution -# # df = 9, 40 means 0.00001 probability -# set res [r hrandfield myhash -1000] -# assert_lessthan [chi_square_value $res] 40 -# -# # 2) Check that all the elements actually belong to the original hash. -# foreach {key val} $res { -# assert {[dict exists $mydict $key]} -# } -# -# # 3) Check that eventually all the elements are returned. -# # Use both WITHVALUES and without -# unset -nocomplain auxset -# set iterations 1000 -# while {$iterations != 0} { -# incr iterations -1 -# if {[expr {$iterations % 2}] == 0} { -# set res [r hrandfield myhash -3 withvalues] -# foreach {key val} $res { -# dict append auxset $key $val -# } -# } else { -# set res [r hrandfield myhash -3] -# foreach key $res { -# dict append auxset $key $val -# } -# } -# if {[lsort [dict keys $mydict]] eq -# [lsort [dict keys $auxset]]} { -# break; -# } -# } -# assert {$iterations != 0} -# -# # PATH 2: positive count (unique behavior) with requested size -# # equal or greater than set size. -# foreach size {10 20} { -# set res [r hrandfield myhash $size] -# assert_equal [llength $res] 10 -# assert_equal [lsort $res] [lsort [dict keys $mydict]] -# -# # again with WITHVALUES -# set res [r hrandfield myhash $size withvalues] -# assert_equal [llength $res] 20 -# assert_equal [lsort $res] [lsort $mydict] -# } -# -# # PATH 3: Ask almost as elements as there are in the set. -# # In this case the implementation will duplicate the original -# # set and will remove random elements up to the requested size. -# # -# # PATH 4: Ask a number of elements definitely smaller than -# # the set size. -# # -# # We can test both the code paths just changing the size but -# # using the same code. -# foreach size {8 2} { -# set res [r hrandfield myhash $size] -# assert_equal [llength $res] $size -# # again with WITHVALUES -# set res [r hrandfield myhash $size withvalues] -# assert_equal [llength $res] [expr {$size * 2}] -# -# # 1) Check that all the elements actually belong to the -# # original set. -# foreach ele [dict keys $res] { -# assert {[dict exists $mydict $ele]} -# } -# -# # 2) Check that eventually all the elements are returned. -# # Use both WITHVALUES and without -# unset -nocomplain auxset -# unset -nocomplain allkey -# set iterations [expr {1000 / $size}] -# set all_ele_return false -# while {$iterations != 0} { -# incr iterations -1 -# if {[expr {$iterations % 2}] == 0} { -# set res [r hrandfield myhash $size withvalues] -# foreach {key value} $res { -# dict append auxset $key $value -# lappend allkey $key -# } -# } else { -# set res [r hrandfield myhash $size] -# foreach key $res { -# dict append auxset $key -# lappend allkey $key -# } -# } -# if {[lsort [dict keys $mydict]] eq -# [lsort [dict keys $auxset]]} { -# set all_ele_return true -# } -# } -# assert_equal $all_ele_return true -# # df = 9, 40 means 0.00001 probability -# assert_lessthan [chi_square_value $allkey] 40 -# } -# } -# r config set hash-max-ziplist-value $original_max_value -# } -# -# -# test {HSET/HLEN - Big hash creation} { -# array set bighash {} -# for {set i 0} {$i < 1024} {incr i} { -# set key __avoid_collisions__[randstring 0 8 alpha] -# set val __avoid_collisions__[randstring 0 8 alpha] -# if {[info exists bighash($key)]} { -# incr i -1 -# continue -# } -# r hset bighash $key $val -# set bighash($key) $val -# } -# list [r hlen bighash] -# } {1024} -# -# test {Is the big hash encoded with an hash table?} { -# assert_encoding hashtable bighash -# } - - test {HGET against the small hash} { - set err {} - foreach k [array names smallhash *] { - if {$smallhash($k) ne [r hget smallhash $k]} { - set err "$smallhash($k) != [r hget smallhash $k]" - break - } - } - set _ $err - } {} - - test {HGET against the big hash} { - set err {} - foreach k [array names bighash *] { - if {$bighash($k) ne [r hget bighash $k]} { - set err "$bighash($k) != [r hget bighash $k]" - break - } - } - set _ $err - } {} - - test {HGET against non existing key} { - set rv {} - lappend rv [r hget smallhash __123123123__] - lappend rv [r hget bighash __123123123__] - set _ $rv - } {{} {}} - -# test {HSET in update and insert mode} { -# set rv {} -# set k [lindex [array names smallhash *] 0] -# lappend rv [r hset smallhash $k newval1] -# set smallhash($k) newval1 -# lappend rv [r hget smallhash $k] -# lappend rv [r hset smallhash __foobar123__ newval] -# set k [lindex [array names bighash *] 0] -# lappend rv [r hset bighash $k newval2] -# set bighash($k) newval2 -# lappend rv [r hget bighash $k] -# lappend rv [r hset bighash __foobar123__ newval] -# lappend rv [r hdel smallhash __foobar123__] -# lappend rv [r hdel bighash __foobar123__] -# set _ $rv -# } {0 newval1 1 0 newval2 1 1 1} - - test {HSETNX target key missing - small hash} { - r hsetnx smallhash __123123123__ foo - r hget smallhash __123123123__ - } {foo} - - test {HSETNX target key exists - small hash} { - r hsetnx smallhash __123123123__ bar - set result [r hget smallhash __123123123__] - r hdel smallhash __123123123__ - set _ $result - } {foo} - - test {HSETNX target key missing - big hash} { - r hsetnx bighash __123123123__ foo - r hget bighash __123123123__ - } {foo} - - test {HSETNX target key exists - big hash} { - r hsetnx bighash __123123123__ bar - set result [r hget bighash __123123123__] - r hdel bighash __123123123__ - set _ $result - } {foo} - - test {HSET/HMSET wrong number of args} { - assert_error {*wrong number of arguments for 'hset' command} {r hset smallhash key1 val1 key2} - assert_error {*wrong number of arguments for 'hmset' command} {r hmset smallhash key1 val1 key2} - } - - test {HMSET - small hash} { - set args {} - foreach {k v} [array get smallhash] { - set newval [randstring 0 8 alpha] - set smallhash($k) $newval - lappend args $k $newval - } - r hmset smallhash {*}$args - } {OK} - -# test {HMSET - big hash} { -# set args {} -# foreach {k v} [array get bighash] { -# set newval [randstring 0 8 alpha] -# set bighash($k) $newval -# lappend args $k $newval -# } -# r hmset bighash {*}$args -# } {OK} - -# test {HMGET against non existing key and fields} { -# set rv {} -# lappend rv [r hmget doesntexist __123123123__ __456456456__] -# lappend rv [r hmget smallhash __123123123__ __456456456__] -# lappend rv [r hmget bighash __123123123__ __456456456__] -# set _ $rv -# } {{{} {}} {{} {}} {{} {}}} - - test {Hash commands against wrong type} { - r set wrongtype somevalue - assert_error "WRONGTYPE Operation against a key*" {r hmget wrongtype field1 field2} - assert_error "WRONGTYPE Operation against a key*" {r hrandfield wrongtype} - assert_error "WRONGTYPE Operation against a key*" {r hget wrongtype field1} - assert_error "WRONGTYPE Operation against a key*" {r hgetall wrongtype} - assert_error "WRONGTYPE Operation against a key*" {r hdel wrongtype field1} - assert_error "WRONGTYPE Operation against a key*" {r hincrby wrongtype field1 2} - assert_error "WRONGTYPE Operation against a key*" {r hincrbyfloat wrongtype field1 2.5} - assert_error "WRONGTYPE Operation against a key*" {r hstrlen wrongtype field1} - assert_error "WRONGTYPE Operation against a key*" {r hvals wrongtype} - assert_error "WRONGTYPE Operation against a key*" {r hkeys wrongtype} - assert_error "WRONGTYPE Operation against a key*" {r hexists wrongtype field1} - assert_error "WRONGTYPE Operation against a key*" {r hset wrongtype field1 val1} - assert_error "WRONGTYPE Operation against a key*" {r hmset wrongtype field1 val1 field2 val2} - assert_error "WRONGTYPE Operation against a key*" {r hsetnx wrongtype field1 val1} - assert_error "WRONGTYPE Operation against a key*" {r hlen wrongtype} - assert_error "WRONGTYPE Operation against a key*" {r hscan wrongtype 0} - } - - test {HMGET - small hash} { - set keys {} - set vals {} - foreach {k v} [array get smallhash] { - lappend keys $k - lappend vals $v - } - set err {} - set result [r hmget smallhash {*}$keys] - if {$vals ne $result} { - set err "$vals != $result" - break - } - set _ $err - } {} - - test {HMGET - big hash} { - set keys {} - set vals {} - foreach {k v} [array get bighash] { - lappend keys $k - lappend vals $v - } - set err {} - set result [r hmget bighash {*}$keys] - if {$vals ne $result} { - set err "$vals != $result" - break - } - set _ $err - } {} - - test {HKEYS - small hash} { - lsort [r hkeys smallhash] - } [lsort [array names smallhash *]] - - test {HKEYS - big hash} { - lsort [r hkeys bighash] - } [lsort [array names bighash *]] - - test {HVALS - small hash} { - set vals {} - foreach {k v} [array get smallhash] { - lappend vals $v - } - set _ [lsort $vals] - } [lsort [r hvals smallhash]] - - test {HVALS - big hash} { - set vals {} - foreach {k v} [array get bighash] { - lappend vals $v - } - set _ [lsort $vals] - } [lsort [r hvals bighash]] - - test {HGETALL - small hash} { - lsort [r hgetall smallhash] - } [lsort [array get smallhash]] - - test {HGETALL - big hash} { - lsort [r hgetall bighash] - } [lsort [array get bighash]] - - test {HGETALL against non-existing key} { - r del htest - r hgetall htest - } {} - - test {HDEL and return value} { - set rv {} - lappend rv [r hdel smallhash nokey] - lappend rv [r hdel bighash nokey] - set k [lindex [array names smallhash *] 0] - lappend rv [r hdel smallhash $k] - lappend rv [r hdel smallhash $k] - lappend rv [r hget smallhash $k] - unset smallhash($k) - set k [lindex [array names bighash *] 0] - lappend rv [r hdel bighash $k] - lappend rv [r hdel bighash $k] - lappend rv [r hget bighash $k] - unset bighash($k) - set _ $rv - } {0 0 1 0 {} 1 0 {}} - - test {HDEL - more than a single value} { - set rv {} - r del myhash - r hmset myhash a 1 b 2 c 3 - assert_equal 0 [r hdel myhash x y] - assert_equal 2 [r hdel myhash a c f] - r hgetall myhash - } {b 2} - - test {HDEL - hash becomes empty before deleting all specified fields} { - r del myhash - r hmset myhash a 1 b 2 c 3 - assert_equal 3 [r hdel myhash a b c d e] - assert_equal 0 [r exists myhash] - } - - test {HEXISTS} { - set rv {} - set k [lindex [array names smallhash *] 0] - lappend rv [r hexists smallhash $k] - lappend rv [r hexists smallhash nokey] - set k [lindex [array names bighash *] 0] - lappend rv [r hexists bighash $k] - lappend rv [r hexists bighash nokey] - } {1 0 1 0} - - test {Is a ziplist encoded Hash promoted on big payload?} { - r hset smallhash foo [string repeat a 1024] - r debug object smallhash - } {*hashtable*} {needs:debug} - - test {HINCRBY against non existing database key} { - r del htest - list [r hincrby htest foo 2] - } {2} - - test {HINCRBY HINCRBYFLOAT against non-integer increment value} { - r del incrhash - r hset incrhash field 5 - assert_error "*value is not an integer*" {r hincrby incrhash field v} - assert_error "*value is not a*" {r hincrbyfloat incrhash field v} - } - - test {HINCRBY against non existing hash key} { - set rv {} - r hdel smallhash tmp - r hdel bighash tmp - lappend rv [r hincrby smallhash tmp 2] - lappend rv [r hget smallhash tmp] - lappend rv [r hincrby bighash tmp 2] - lappend rv [r hget bighash tmp] - } {2 2 2 2} - - test {HINCRBY against hash key created by hincrby itself} { - set rv {} - lappend rv [r hincrby smallhash tmp 3] - lappend rv [r hget smallhash tmp] - lappend rv [r hincrby bighash tmp 3] - lappend rv [r hget bighash tmp] - } {5 5 5 5} - - test {HINCRBY against hash key originally set with HSET} { - r hset smallhash tmp 100 - r hset bighash tmp 100 - list [r hincrby smallhash tmp 2] [r hincrby bighash tmp 2] - } {102 102} - - test {HINCRBY over 32bit value} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrby smallhash tmp 1] [r hincrby bighash tmp 1] - } {17179869185 17179869185} - - test {HINCRBY over 32bit value with over 32bit increment} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrby smallhash tmp 17179869184] [r hincrby bighash tmp 17179869184] - } {34359738368 34359738368} - - test {HINCRBY fails against hash value with spaces (left)} { - r hset smallhash str " 11" - r hset bighash str " 11" - catch {r hincrby smallhash str 1} smallerr - catch {r hincrby bighash str 1} bigerr - set rv {} - lappend rv [string match "ERR *not an integer*" $smallerr] - lappend rv [string match "ERR *not an integer*" $bigerr] - } {1 1} - - test {HINCRBY fails against hash value with spaces (right)} { - r hset smallhash str "11 " - r hset bighash str "11 " - catch {r hincrby smallhash str 1} smallerr - catch {r hincrby bighash str 1} bigerr - set rv {} - lappend rv [string match "ERR *not an integer*" $smallerr] - lappend rv [string match "ERR *not an integer*" $bigerr] - } {1 1} - - test {HINCRBY can detect overflows} { - set e {} - r hset hash n -9223372036854775484 - assert {[r hincrby hash n -1] == -9223372036854775485} - catch {r hincrby hash n -10000} e - set e - } {*overflow*} - - test {HINCRBYFLOAT against non existing database key} { - r del htest - list [r hincrbyfloat htest foo 2.5] - } {2.5} - - test {HINCRBYFLOAT against non existing hash key} { - set rv {} - r hdel smallhash tmp - r hdel bighash tmp - lappend rv [roundFloat [r hincrbyfloat smallhash tmp 2.5]] - lappend rv [roundFloat [r hget smallhash tmp]] - lappend rv [roundFloat [r hincrbyfloat bighash tmp 2.5]] - lappend rv [roundFloat [r hget bighash tmp]] - } {2.5 2.5 2.5 2.5} - - test {HINCRBYFLOAT against hash key created by hincrby itself} { - set rv {} - lappend rv [roundFloat [r hincrbyfloat smallhash tmp 3.5]] - lappend rv [roundFloat [r hget smallhash tmp]] - lappend rv [roundFloat [r hincrbyfloat bighash tmp 3.5]] - lappend rv [roundFloat [r hget bighash tmp]] - } {6 6 6 6} - - test {HINCRBYFLOAT against hash key originally set with HSET} { - r hset smallhash tmp 100 - r hset bighash tmp 100 - list [roundFloat [r hincrbyfloat smallhash tmp 2.5]] \ - [roundFloat [r hincrbyfloat bighash tmp 2.5]] - } {102.5 102.5} - - test {HINCRBYFLOAT over 32bit value} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrbyfloat smallhash tmp 1] \ - [r hincrbyfloat bighash tmp 1] - } {17179869185 17179869185} - - test {HINCRBYFLOAT over 32bit value with over 32bit increment} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrbyfloat smallhash tmp 17179869184] \ - [r hincrbyfloat bighash tmp 17179869184] - } {34359738368 34359738368} - - test {HINCRBYFLOAT fails against hash value with spaces (left)} { - r hset smallhash str " 11" - r hset bighash str " 11" - catch {r hincrbyfloat smallhash str 1} smallerr - catch {r hincrbyfloat bighash str 1} bigerr - set rv {} - lappend rv [string match "ERR *not*float*" $smallerr] - lappend rv [string match "ERR *not*float*" $bigerr] - } {1 1} - - test {HINCRBYFLOAT fails against hash value with spaces (right)} { - r hset smallhash str "11 " - r hset bighash str "11 " - catch {r hincrbyfloat smallhash str 1} smallerr - catch {r hincrbyfloat bighash str 1} bigerr - set rv {} - lappend rv [string match "ERR *not*float*" $smallerr] - lappend rv [string match "ERR *not*float*" $bigerr] - } {1 1} - - test {HINCRBYFLOAT fails against hash value that contains a null-terminator in the middle} { - r hset h f "1\x002" - catch {r hincrbyfloat h f 1} err - set rv {} - lappend rv [string match "ERR *not*float*" $err] - } {1} - - test {HSTRLEN against the small hash} { - set err {} - foreach k [array names smallhash *] { - if {[string length $smallhash($k)] ne [r hstrlen smallhash $k]} { - set err "[string length $smallhash($k)] != [r hstrlen smallhash $k]" - break - } - } - set _ $err - } {} - - test {HSTRLEN against the big hash} { - set err {} - foreach k [array names bighash *] { - if {[string length $bighash($k)] ne [r hstrlen bighash $k]} { - set err "[string length $bighash($k)] != [r hstrlen bighash $k]" - puts "HSTRLEN and logical length mismatch:" - puts "key: $k" - puts "Logical content: $bighash($k)" - puts "Server content: [r hget bighash $k]" - } - } - set _ $err - } {} - - test {HSTRLEN against non existing field} { - set rv {} - lappend rv [r hstrlen smallhash __123123123__] - lappend rv [r hstrlen bighash __123123123__] - set _ $rv - } {0 0} - - test {HSTRLEN corner cases} { - set vals { - -9223372036854775808 9223372036854775807 9223372036854775808 - {} 0 -1 x - } - foreach v $vals { - r hmset smallhash field $v - r hmset bighash field $v - set len1 [string length $v] - set len2 [r hstrlen smallhash field] - set len3 [r hstrlen bighash field] - assert {$len1 == $len2} - assert {$len2 == $len3} - } - } - - test {HINCRBYFLOAT over hash-max-listpack-value encoded with a listpack} { - set original_max_value [lindex [r config get hash-max-ziplist-value] 1] - r config set hash-max-listpack-value 8 - - # hash's value exceeds hash-max-listpack-value - r del smallhash - r del bighash - r hset smallhash tmp 0 - r hset bighash tmp 0 - r hincrbyfloat smallhash tmp 0.000005 - r hincrbyfloat bighash tmp 0.0000005 - assert_encoding listpack smallhash - assert_encoding hashtable bighash - - # hash's field exceeds hash-max-listpack-value - r del smallhash - r del bighash - r hincrbyfloat smallhash abcdefgh 1 - r hincrbyfloat bighash abcdefghi 1 - assert_encoding listpack smallhash - assert_encoding hashtable bighash - - r config set hash-max-listpack-value $original_max_value - } - - test {Hash ziplist regression test for large keys} { - r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a - r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b - r hget hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk - } {b} - - foreach size {10 512} { - test "Hash fuzzing #1 - $size fields" { - for {set times 0} {$times < 10} {incr times} { - catch {unset hash} - array set hash {} - r del hash - - # Create - for {set j 0} {$j < $size} {incr j} { - set field [randomValue] - set value [randomValue] - r hset hash $field $value - set hash($field) $value - } - - # Verify - foreach {k v} [array get hash] { - assert_equal $v [r hget hash $k] - } - assert_equal [array size hash] [r hlen hash] - } - } - - test "Hash fuzzing #2 - $size fields" { - for {set times 0} {$times < 10} {incr times} { - catch {unset hash} - array set hash {} - r del hash - - # Create - for {set j 0} {$j < $size} {incr j} { - randpath { - set field [randomValue] - set value [randomValue] - r hset hash $field $value - set hash($field) $value - } { - set field [randomSignedInt 512] - set value [randomSignedInt 512] - r hset hash $field $value - set hash($field) $value - } { - randpath { - set field [randomValue] - } { - set field [randomSignedInt 512] - } - r hdel hash $field - unset -nocomplain hash($field) - } - } - - # Verify - foreach {k v} [array get hash] { - assert_equal $v [r hget hash $k] - } - assert_equal [array size hash] [r hlen hash] - } - } - } - - test {Stress test the hash ziplist -> hashtable encoding conversion} { - r config set hash-max-ziplist-entries 32 - for {set j 0} {$j < 100} {incr j} { - r del myhash - for {set i 0} {$i < 64} {incr i} { - r hset myhash [randomValue] [randomValue] - } - assert_encoding hashtable myhash - } - } - - # The following test can only be executed if we don't use Valgrind, and if - # we are using x86_64 architecture, because: - # - # 1) Valgrind has floating point limitations, no support for 80 bits math. - # 2) Other archs may have the same limits. - # - # 1.23 cannot be represented correctly with 64 bit doubles, so we skip - # the test, since we are only testing pretty printing here and is not - # a bug if the program outputs things like 1.299999... - if {!$::valgrind && [string match *x86_64* [exec uname -a]]} { - test {Test HINCRBYFLOAT for correct float representation (issue #2846)} { - r del myhash - assert {[r hincrbyfloat myhash float 1.23] eq {1.23}} - assert {[r hincrbyfloat myhash float 0.77] eq {2}} - assert {[r hincrbyfloat myhash float -0.1] eq {1.9}} - } - } - - test {Hash ziplist of various encodings} { - r del k - config_set hash-max-ziplist-entries 1000000000 - config_set hash-max-ziplist-value 1000000000 - r hset k ZIP_INT_8B 127 - r hset k ZIP_INT_16B 32767 - r hset k ZIP_INT_32B 2147483647 - r hset k ZIP_INT_64B 9223372036854775808 - r hset k ZIP_INT_IMM_MIN 0 - r hset k ZIP_INT_IMM_MAX 12 - r hset k ZIP_STR_06B [string repeat x 31] - r hset k ZIP_STR_14B [string repeat x 8191] - r hset k ZIP_STR_32B [string repeat x 65535] - set k [r hgetall k] - set dump [r dump k] - - # will be converted to dict at RESTORE - config_set hash-max-ziplist-entries 2 - config_set sanitize-dump-payload no mayfail - r restore kk 0 $dump - set kk [r hgetall kk] - - # make sure the values are right - assert_equal [lsort $k] [lsort $kk] - assert_equal [dict get $k ZIP_STR_06B] [string repeat x 31] - set k [dict remove $k ZIP_STR_06B] - assert_equal [dict get $k ZIP_STR_14B] [string repeat x 8191] - set k [dict remove $k ZIP_STR_14B] - assert_equal [dict get $k ZIP_STR_32B] [string repeat x 65535] - set k [dict remove $k ZIP_STR_32B] - set _ $k - } {ZIP_INT_8B 127 ZIP_INT_16B 32767 ZIP_INT_32B 2147483647 ZIP_INT_64B 9223372036854775808 ZIP_INT_IMM_MIN 0 ZIP_INT_IMM_MAX 12} - - test {Hash ziplist of various encodings - sanitize dump} { - config_set sanitize-dump-payload yes mayfail - r restore kk 0 $dump replace - set k [r hgetall k] - set kk [r hgetall kk] - - # make sure the values are right - assert_equal [lsort $k] [lsort $kk] - assert_equal [dict get $k ZIP_STR_06B] [string repeat x 31] - set k [dict remove $k ZIP_STR_06B] - assert_equal [dict get $k ZIP_STR_14B] [string repeat x 8191] - set k [dict remove $k ZIP_STR_14B] - assert_equal [dict get $k ZIP_STR_32B] [string repeat x 65535] - set k [dict remove $k ZIP_STR_32B] - set _ $k - } {ZIP_INT_8B 127 ZIP_INT_16B 32767 ZIP_INT_32B 2147483647 ZIP_INT_64B 9223372036854775808 ZIP_INT_IMM_MIN 0 ZIP_INT_IMM_MAX 12} - - # On some platforms strtold("+inf") with valgrind returns a non-inf result - if {!$::valgrind} { - test {HINCRBYFLOAT does not allow NaN or Infinity} { - assert_error "*value is NaN or Infinity*" {r hincrbyfloat hfoo field +inf} - assert_equal 0 [r exists hfoo] - } - } -} \ No newline at end of file diff --git a/tests/unit/type/list-2.tcl b/tests/unit/type/list-2.tcl deleted file mode 100644 index b54bdc85a..000000000 --- a/tests/unit/type/list-2.tcl +++ /dev/null @@ -1,47 +0,0 @@ -start_server { - tags {"list"} - overrides { - "list-max-ziplist-size" 4 - } -} { - source "tests/unit/type/list-common.tcl" - - foreach {type large} [array get largevalue] { - tags {"slow"} { - test "LTRIM stress testing - $type" { - set mylist {} - set startlen 32 - r del mylist - - # Start with the large value to ensure the - # right encoding is used. - r rpush mylist $large - lappend mylist $large - - for {set i 0} {$i < $startlen} {incr i} { - set str [randomInt 9223372036854775807] - r rpush mylist $str - lappend mylist $str - } - - for {set i 0} {$i < 1000} {incr i} { - set min [expr {int(rand()*$startlen)}] - set max [expr {$min+int(rand()*$startlen)}] - set before_len [llength $mylist] - set before_len_r [r llen mylist] - assert_equal $before_len $before_len_r - set mylist [lrange $mylist $min $max] - r ltrim mylist $min $max - assert_equal $mylist [r lrange mylist 0 -1] "failed trim" - - for {set j [r llen mylist]} {$j < $startlen} {incr j} { - set str [randomInt 9223372036854775807] - r rpush mylist $str - lappend mylist $str - assert_equal $mylist [r lrange mylist 0 -1] "failed append match" - } - } - } - } - } -} \ No newline at end of file diff --git a/tests/unit/type/list-3.tcl b/tests/unit/type/list-3.tcl deleted file mode 100644 index eba209f9f..000000000 --- a/tests/unit/type/list-3.tcl +++ /dev/null @@ -1,232 +0,0 @@ -proc generate_cmd_on_list_key {key} { - set op [randomInt 7] - set small_signed_count [expr 5-[randomInt 10]] - if {[randomInt 2] == 0} { - set ele [randomInt 1000] - } else { - set ele [string repeat x [randomInt 10000]][randomInt 1000] - } - switch $op { - 0 {return "lpush $key $ele"} - 1 {return "rpush $key $ele"} - 2 {return "lpop $key"} - 3 {return "rpop $key"} - 4 { - return "lset $key $small_signed_count $ele" - } - 5 { - set otherele [randomInt 1000] - if {[randomInt 2] == 0} { - set where before - } else { - set where after - } - return "linsert $key $where $otherele $ele" - } - 6 { - set otherele "" - catch { - set index [randomInt [r llen $key]] - set otherele [r lindex $key $index] - } - return "lrem $key 1 $otherele" - } - } -} - -start_server { - tags {"list ziplist"} - overrides { - "list-max-ziplist-size" 16 - } -} { - test {Explicit regression for a list bug} { - set mylist {49376042582 {BkG2o\pIC]4YYJa9cJ4GWZalG[4tin;1D2whSkCOW`mX;SFXGyS8sedcff3fQI^tgPCC@^Nu1J6o]meM@Lko]t_jRyotK?tH[\EvWqS]b`o2OCtjg:?nUTwdjpcUm]y:pg5q24q7LlCOwQE^}} - r del l - r rpush l [lindex $mylist 0] - r rpush l [lindex $mylist 1] - assert_equal [r lindex l 0] [lindex $mylist 0] - assert_equal [r lindex l 1] [lindex $mylist 1] - } - - test {Regression for quicklist #3343 bug} { - r del mylist - r lpush mylist 401 - r lpush mylist 392 - r rpush mylist [string repeat x 5105]"799" - r lset mylist -1 [string repeat x 1014]"702" - r lpop mylist - r lset mylist -1 [string repeat x 4149]"852" - r linsert mylist before 401 [string repeat x 9927]"12" - r lrange mylist 0 -1 - r ping ; # It's enough if the server is still alive - } {PONG} - - test {Check compression with recompress} { - r del key - config_set list-compress-depth 1 - config_set list-max-ziplist-size 16 - r rpush key a - r rpush key [string repeat b 50000] - r rpush key c - r lset key 1 d - r rpop key - r rpush key [string repeat e 5000] - r linsert key before f 1 - r rpush key g - r ping - } - - test {Crash due to wrongly recompress after lrem} { - r del key - config_set list-compress-depth 2 - r lpush key a - r lpush key [string repeat a 5000] - r lpush key [string repeat b 5000] - r lpush key [string repeat c 5000] - r rpush key [string repeat x 10000]"969" - r rpush key b - r lrem key 1 a - r rpop key - r lrem key 1 [string repeat x 10000]"969" - r rpush key crash - r ping - } - - test {LINSERT correctly recompress full quicklistNode after inserting a element before it} { - r del key - config_set list-compress-depth 1 - r rpush key b - r rpush key c - r lset key -1 [string repeat x 8192]"969" - r lpush key a - r rpush key d - r linsert key before b f - r rpop key - r ping - } - - test {LINSERT correctly recompress full quicklistNode after inserting a element after it} { - r del key - config_set list-compress-depth 1 - r rpush key b - r rpush key c - r lset key 0 [string repeat x 8192]"969" - r lpush key a - r rpush key d - r linsert key after c f - r lpop key - r ping - } - -foreach comp {2 1 0} { - set cycles 1000 - if {$::accurate} { set cycles 10000 } - config_set list-compress-depth $comp - - test "Stress tester for #3343-alike bugs comp: $comp" { - r del key - set sent {} - for {set j 0} {$j < $cycles} {incr j} { - catch { - set cmd [generate_cmd_on_list_key key] - lappend sent $cmd - - # execute the command, we expect commands to fail on syntax errors - r {*}$cmd - } - } - - set print_commands false - set crash false - if {[catch {r ping}]} { - puts "Server crashed" - set print_commands true - set crash true - } - - if {!$::external} { - # check valgrind and asan report for invalid reads after execute - # command so that we have a report that is easier to reproduce - set valgrind_errors [find_valgrind_errors [srv 0 stderr] false] - set asan_errors [sanitizer_errors_from_file [srv 0 stderr]] - if {$valgrind_errors != "" || $asan_errors != ""} { - puts "valgrind or asan found an issue" - set print_commands true - } - } - - if {$print_commands} { - puts "violating commands:" - foreach cmd $sent { - puts $cmd - } - } - - assert_equal $crash false - } -} ;# foreach comp - - tags {slow} { - test {ziplist implementation: value encoding and backlink} { - if {$::accurate} {set iterations 100} else {set iterations 10} - for {set j 0} {$j < $iterations} {incr j} { - r del l - set l {} - for {set i 0} {$i < 200} {incr i} { - randpath { - set data [string repeat x [randomInt 100000]] - } { - set data [randomInt 65536] - } { - set data [randomInt 4294967296] - } { - set data [randomInt 18446744073709551616] - } { - set data -[randomInt 65536] - if {$data eq {-0}} {set data 0} - } { - set data -[randomInt 4294967296] - if {$data eq {-0}} {set data 0} - } { - set data -[randomInt 18446744073709551616] - if {$data eq {-0}} {set data 0} - } - lappend l $data - r rpush l $data - } - assert_equal [llength $l] [r llen l] - # Traverse backward - for {set i 199} {$i >= 0} {incr i -1} { - if {[lindex $l $i] ne [r lindex l $i]} { - assert_equal [lindex $l $i] [r lindex l $i] - } - } - } - } - - test {ziplist implementation: encoding stress testing} { - for {set j 0} {$j < 200} {incr j} { - r del l - set l {} - set len [randomInt 400] - for {set i 0} {$i < $len} {incr i} { - set rv [randomValue] - randpath { - lappend l $rv - r rpush l $rv - } { - set l [concat [list $rv] $l] - r lpush l $rv - } - } - assert_equal [llength $l] [r llen l] - for {set i 0} {$i < $len} {incr i} { - if {[lindex $l $i] ne [r lindex l $i]} { - assert_equal [lindex $l $i] [r lindex l $i] - } - } - } - } - } -} \ No newline at end of file diff --git a/tests/unit/type/list-common.tcl b/tests/unit/type/list-common.tcl deleted file mode 100644 index ab45f0b31..000000000 --- a/tests/unit/type/list-common.tcl +++ /dev/null @@ -1,5 +0,0 @@ -# We need a value larger than list-max-ziplist-value to make sure -# the list has the right encoding when it is swapped in again. -array set largevalue {} -set largevalue(ziplist) "hello" -set largevalue(linkedlist) [string repeat "hello" 4] diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl deleted file mode 100644 index 68eea8f66..000000000 --- a/tests/unit/type/list.tcl +++ /dev/null @@ -1,2431 +0,0 @@ -# check functionality compression of plain and packed nodes -start_server [list overrides [list save ""] ] { - r config set list-compress-depth 2 - r config set list-max-ziplist-size 1 - - # 3 test to check compression with plain and packed nodes - # 1. using push + insert - # 2. using push + insert + trim - # 3. using push + insert + set - - foreach {container size} {packed 500 plain 8193} { - test "$container node check compression with insert and pop" { - r flushdb - r lpush list1 [string repeat a $size] - r lpush list1 [string repeat b $size] - r lpush list1 [string repeat c $size] - r lpush list1 [string repeat d $size] - r linsert list1 after [string repeat d $size] [string repeat e $size] - r linsert list1 after [string repeat d $size] [string repeat f $size] - r linsert list1 after [string repeat d $size] [string repeat g $size] - r linsert list1 after [string repeat d $size] [string repeat j $size] - assert_equal [r lpop list1] [string repeat d $size] - assert_equal [r lpop list1] [string repeat j $size] - assert_equal [r lpop list1] [string repeat g $size] - assert_equal [r lpop list1] [string repeat f $size] - assert_equal [r lpop list1] [string repeat e $size] - assert_equal [r lpop list1] [string repeat c $size] - assert_equal [r lpop list1] [string repeat b $size] - assert_equal [r lpop list1] [string repeat a $size] - }; - - test "$container node check compression combined with trim" { - r flushdb - r lpush list2 [string repeat a $size] - r linsert list2 after [string repeat a $size] [string repeat b $size] - r rpush list2 [string repeat c $size] - assert_equal [string repeat b $size] [r lindex list2 1] - r LTRIM list2 1 -1 - r llen list2 - } {2} - - test {LINSERT against non-list value error} { - r set k1 v1 - assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r linsert k1 after 0 0} - } - - test "$container node check compression with lset" { - r flushdb - r lpush list3 [string repeat a $size] - r LSET list3 0 [string repeat b $size] - assert_equal [string repeat b $size] [r lindex list3 0] - r lpush list3 [string repeat c $size] - r LSET list3 0 [string repeat d $size] - assert_equal [string repeat d $size] [r lindex list3 0] - } - } ;# foreach - - # revert config for external mode tests. - r config set list-compress-depth 0 -} - -# check functionality of plain nodes using low packed-threshold -start_server [list overrides [list save ""] ] { -foreach type {listpack quicklist} { - if {$type eq "listpack"} { - r config set list-max-listpack-size -2 - } else { - r config set list-max-listpack-size 1 - } - - # basic command check for plain nodes - "LPUSH & LPOP" - test {Test LPUSH and LPOP on plain nodes} { - r flushdb - r debug quicklist-packed-threshold 1b - r lpush lst 9 - r lpush lst xxxxxxxxxx - r lpush lst xxxxxxxxxx - assert_encoding $type lst - set s0 [s used_memory] - assert {$s0 > 10} - assert {[r llen lst] == 3} - set s0 [r rpop lst] - set s1 [r rpop lst] - assert {$s0 eq "9"} - assert {[r llen lst] == 1} - r lpop lst - assert {[string length $s1] == 10} - # check rdb - r lpush lst xxxxxxxxxx - r lpush lst bb - r debug reload - assert_equal [r rpop lst] "xxxxxxxxxx" - r debug quicklist-packed-threshold 0 - } {OK} {needs:debug} - - # basic command check for plain nodes - "LINDEX & LINSERT" - test {Test LINDEX and LINSERT on plain nodes} { - r flushdb - r debug quicklist-packed-threshold 1b - r lpush lst xxxxxxxxxxx - r lpush lst 9 - r lpush lst xxxxxxxxxxx - assert_encoding $type lst - r linsert lst before "9" "8" - assert {[r lindex lst 1] eq "8"} - r linsert lst BEFORE "9" "7" - r linsert lst BEFORE "9" "xxxxxxxxxxx" - assert {[r lindex lst 3] eq "xxxxxxxxxxx"} - r debug quicklist-packed-threshold 0 - } {OK} {needs:debug} - - # basic command check for plain nodes - "LTRIM" - test {Test LTRIM on plain nodes} { - r flushdb - r debug quicklist-packed-threshold 1b - r lpush lst1 9 - r lpush lst1 xxxxxxxxxxx - r lpush lst1 9 - assert_encoding $type lst1 - r LTRIM lst1 1 -1 - assert_equal [r llen lst1] 2 - r debug quicklist-packed-threshold 0 - } {OK} {needs:debug} - - # basic command check for plain nodes - "LREM" - test {Test LREM on plain nodes} { - r flushdb - r debug quicklist-packed-threshold 1b - r lpush lst one - r lpush lst xxxxxxxxxxx - assert_encoding $type lst - set s0 [s used_memory] - assert {$s0 > 10} - r lpush lst 9 - r LREM lst -2 "one" - assert_equal [r llen lst] 2 - r debug quicklist-packed-threshold 0 - } {OK} {needs:debug} - - # basic command check for plain nodes - "LPOS" - test {Test LPOS on plain nodes} { - r flushdb - r debug quicklist-packed-threshold 1b - r RPUSH lst "aa" - r RPUSH lst "bb" - r RPUSH lst "cc" - assert_encoding $type lst - r LSET lst 0 "xxxxxxxxxxx" - assert_equal [r LPOS lst "xxxxxxxxxxx"] 0 - r debug quicklist-packed-threshold 0 - } {OK} {needs:debug} - - # basic command check for plain nodes - "LMOVE" - test {Test LMOVE on plain nodes} { - r flushdb - r debug quicklist-packed-threshold 1b - r RPUSH lst2{t} "aa" - r RPUSH lst2{t} "bb" - assert_encoding $type lst2{t} - r LSET lst2{t} 0 xxxxxxxxxxx - r RPUSH lst2{t} "cc" - r RPUSH lst2{t} "dd" - r LMOVE lst2{t} lst{t} RIGHT LEFT - r LMOVE lst2{t} lst{t} LEFT RIGHT - assert_equal [r llen lst{t}] 2 - assert_equal [r llen lst2{t}] 2 - assert_equal [r lpop lst2{t}] "bb" - assert_equal [r lpop lst2{t}] "cc" - assert_equal [r lpop lst{t}] "dd" - assert_equal [r lpop lst{t}] "xxxxxxxxxxx" - r debug quicklist-packed-threshold 0 - } {OK} {needs:debug} - - # testing LSET with combinations of node types - # plain->packed , packed->plain, plain->plain, packed->packed - test {Test LSET with packed / plain combinations} { - r debug quicklist-packed-threshold 5b - r RPUSH lst "aa" - r RPUSH lst "bb" - assert_encoding $type lst - r lset lst 0 [string repeat d 50001] - set s1 [r lpop lst] - assert_equal $s1 [string repeat d 50001] - r RPUSH lst [string repeat f 50001] - r lset lst 0 [string repeat e 50001] - set s1 [r lpop lst] - assert_equal $s1 [string repeat e 50001] - r RPUSH lst [string repeat m 50001] - r lset lst 0 "bb" - set s1 [r lpop lst] - assert_equal $s1 "bb" - r RPUSH lst "bb" - r lset lst 0 "cc" - set s1 [r lpop lst] - assert_equal $s1 "cc" - r debug quicklist-packed-threshold 0 - } {OK} {needs:debug} - - # checking LSET in case ziplist needs to be split - test {Test LSET with packed is split in the middle} { - set original_config [config_get_set list-max-listpack-size 4] - r flushdb - r debug quicklist-packed-threshold 5b - r RPUSH lst "aa" - r RPUSH lst "bb" - r RPUSH lst "cc" - r RPUSH lst "dd" - r RPUSH lst "ee" - assert_encoding quicklist lst - r lset lst 2 [string repeat e 10] - assert_equal [r lpop lst] "aa" - assert_equal [r lpop lst] "bb" - assert_equal [r lpop lst] [string repeat e 10] - assert_equal [r lpop lst] "dd" - assert_equal [r lpop lst] "ee" - r debug quicklist-packed-threshold 0 - r config set list-max-listpack-size $original_config - } {OK} {needs:debug} - - - # repeating "plain check LSET with combinations" - # but now with single item in each ziplist - test {Test LSET with packed consist only one item} { - r flushdb - set original_config [config_get_set list-max-ziplist-size 1] - r debug quicklist-packed-threshold 1b - r RPUSH lst "aa" - r RPUSH lst "bb" - r lset lst 0 [string repeat d 50001] - set s1 [r lpop lst] - assert_equal $s1 [string repeat d 50001] - r RPUSH lst [string repeat f 50001] - r lset lst 0 [string repeat e 50001] - set s1 [r lpop lst] - assert_equal $s1 [string repeat e 50001] - r RPUSH lst [string repeat m 50001] - r lset lst 0 "bb" - set s1 [r lpop lst] - assert_equal $s1 "bb" - r RPUSH lst "bb" - r lset lst 0 "cc" - set s1 [r lpop lst] - assert_equal $s1 "cc" - r debug quicklist-packed-threshold 0 - r config set list-max-ziplist-size $original_config - } {OK} {needs:debug} - - test {Crash due to delete entry from a compress quicklist node} { - r flushdb - r debug quicklist-packed-threshold 100b - set original_config [config_get_set list-compress-depth 1] - - set small_ele [string repeat x 32] - set large_ele [string repeat x 100] - - # Push a large element - r RPUSH lst $large_ele - - # Insert two elements and keep them in the same node - r RPUSH lst $small_ele - r RPUSH lst $small_ele - assert_encoding $type lst - - # When setting the position of -1 to a large element, we first insert - # a large element at the end and then delete its previous element. - r LSET lst -1 $large_ele - assert_equal "$large_ele $small_ele $large_ele" [r LRANGE lst 0 -1] - - r debug quicklist-packed-threshold 0 - r config set list-compress-depth $original_config - } {OK} {needs:debug} - - test {Crash due to split quicklist node wrongly} { - r flushdb - r debug quicklist-packed-threshold 10b - - r LPUSH lst "aa" - r LPUSH lst "bb" - assert_encoding $type lst - r LSET lst -2 [string repeat x 10] - r RPOP lst - assert_equal [string repeat x 10] [r LRANGE lst 0 -1] - - r debug quicklist-packed-threshold 0 - } {OK} {needs:debug} -} -} - -run_solo {list-large-memory} { -start_server [list overrides [list save ""] ] { - -# test if the server supports such large configs (avoid 32 bit builds) -catch { - r config set proto-max-bulk-len 10000000000 ;#10gb - r config set client-query-buffer-limit 10000000000 ;#10gb -} -if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} { - - set str_length 5000000000 - - # repeating all the plain nodes basic checks with 5gb values - test {Test LPUSH and LPOP on plain nodes over 4GB} { - r flushdb - r lpush lst 9 - r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" - write_big_bulk $str_length; - r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" - write_big_bulk $str_length; - set s0 [s used_memory] - assert {$s0 > $str_length} - assert {[r llen lst] == 3} - assert_equal [r rpop lst] "9" - assert_equal [read_big_bulk {r rpop lst}] $str_length - assert {[r llen lst] == 1} - assert_equal [read_big_bulk {r rpop lst}] $str_length - } {} {large-memory} - - test {Test LINDEX and LINSERT on plain nodes over 4GB} { - r flushdb - r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" - write_big_bulk $str_length; - r lpush lst 9 - r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" - write_big_bulk $str_length; - r linsert lst before "9" "8" - assert_equal [r lindex lst 1] "8" - r LINSERT lst BEFORE "9" "7" - r write "*5\r\n\$7\r\nLINSERT\r\n\$3\r\nlst\r\n\$6\r\nBEFORE\r\n\$3\r\n\"9\"\r\n" - write_big_bulk 10; - assert_equal [read_big_bulk {r rpop lst}] $str_length - } {} {large-memory} - - test {Test LTRIM on plain nodes over 4GB} { - r flushdb - r lpush lst 9 - r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" - write_big_bulk $str_length; - r lpush lst 9 - r LTRIM lst 1 -1 - assert_equal [r llen lst] 2 - assert_equal [r rpop lst] 9 - assert_equal [read_big_bulk {r rpop lst}] $str_length - } {} {large-memory} - - test {Test LREM on plain nodes over 4GB} { - r flushdb - r lpush lst one - r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n" - write_big_bulk $str_length; - r lpush lst 9 - r LREM lst -2 "one" - assert_equal [read_big_bulk {r rpop lst}] $str_length - r llen lst - } {1} {large-memory} - - test {Test LSET on plain nodes over 4GB} { - r flushdb - r RPUSH lst "aa" - r RPUSH lst "bb" - r RPUSH lst "cc" - r write "*4\r\n\$4\r\nLSET\r\n\$3\r\nlst\r\n\$1\r\n0\r\n" - write_big_bulk $str_length; - assert_equal [r rpop lst] "cc" - assert_equal [r rpop lst] "bb" - assert_equal [read_big_bulk {r rpop lst}] $str_length - } {} {large-memory} - - test {Test LSET on plain nodes with large elements under packed_threshold over 4GB} { - r flushdb - r rpush lst a b c d e - for {set i 0} {$i < 5} {incr i} { - r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" - write_big_bulk 1000000000 - } - r ping - } {PONG} {large-memory} - - test {Test LSET splits a quicklist node, and then merge} { - # Test when a quicklist node can't be inserted and is split, the split - # node merges with the node before it and the `before` node is kept. - r flushdb - r rpush lst [string repeat "x" 4096] - r lpush lst a b c d e f g - r lpush lst [string repeat "y" 4096] - # now: [y...] [g f e d c b a x...] - # (node0) (node1) - # Keep inserting elements into node1 until node1 is split into two - # nodes([g] [...]), eventually node0 will merge with the [g] node. - # Since node0 is larger, after the merge node0 will be kept and - # the [g] node will be deleted. - for {set i 7} {$i >= 3} {incr i -1} { - r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" - write_big_bulk 1000000000 - } - assert_equal "g" [r lindex lst 1] - r ping - } {PONG} {large-memory} - - test {Test LSET splits a LZF compressed quicklist node, and then merge} { - # Test when a LZF compressed quicklist node can't be inserted and is split, - # the split node merges with the node before it and the split node is kept. - r flushdb - r config set list-compress-depth 1 - r lpush lst [string repeat "x" 2000] - r rpush lst [string repeat "y" 7000] - r rpush lst a b c d e f g - r rpush lst [string repeat "z" 8000] - r lset lst 0 h - # now: [h] [y... a b c d e f g] [z...] - # node0 node1(LZF) - # Keep inserting elements into node1 until node1 is split into two - # nodes([y...] [...]), eventually node0 will merge with the [y...] node. - # Since [y...] node is larger, after the merge node0 will be deleted and - # the [y...] node will be kept. - for {set i 7} {$i >= 3} {incr i -1} { - r write "*4\r\n\$4\r\nlset\r\n\$3\r\nlst\r\n\$1\r\n$i\r\n" - write_big_bulk 1000000000 - } - assert_equal "h" [r lindex lst 0] - r config set list-compress-depth 0 - r ping - } {PONG} {large-memory} - - test {Test LMOVE on plain nodes over 4GB} { - r flushdb - r RPUSH lst2{t} "aa" - r RPUSH lst2{t} "bb" - r write "*4\r\n\$4\r\nLSET\r\n\$7\r\nlst2{t}\r\n\$1\r\n0\r\n" - write_big_bulk $str_length; - r RPUSH lst2{t} "cc" - r RPUSH lst2{t} "dd" - r LMOVE lst2{t} lst{t} RIGHT LEFT - assert_equal [read_big_bulk {r LMOVE lst2{t} lst{t} LEFT RIGHT}] $str_length - assert_equal [r llen lst{t}] 2 - assert_equal [r llen lst2{t}] 2 - assert_equal [r lpop lst2{t}] "bb" - assert_equal [r lpop lst2{t}] "cc" - assert_equal [r lpop lst{t}] "dd" - assert_equal [read_big_bulk {r rpop lst{t}}] $str_length - } {} {large-memory} - - # restore defaults - r config set proto-max-bulk-len 536870912 - r config set client-query-buffer-limit 1073741824 - -} ;# skip 32bit builds -} -} ;# run_solo - -start_server { - tags {"list"} - overrides { - "list-max-ziplist-size" -1 - } -} { - source "tests/unit/type/list-common.tcl" - - # A helper function to execute either B*POP or BLMPOP* with one input key. - proc bpop_command {rd pop key timeout} { - if {$pop == "BLMPOP_LEFT"} { - $rd blmpop $timeout 1 $key left count 1 - } elseif {$pop == "BLMPOP_RIGHT"} { - $rd blmpop $timeout 1 $key right count 1 - } else { - $rd $pop $key $timeout - } - } - - # A helper function to execute either B*POP or BLMPOP* with two input keys. - proc bpop_command_two_key {rd pop key key2 timeout} { - if {$pop == "BLMPOP_LEFT"} { - $rd blmpop $timeout 2 $key $key2 left count 1 - } elseif {$pop == "BLMPOP_RIGHT"} { - $rd blmpop $timeout 2 $key $key2 right count 1 - } else { - $rd $pop $key $key2 $timeout - } - } - - proc create_listpack {key entries} { - r del $key - foreach entry $entries { r rpush $key $entry } - assert_encoding listpack $key - } - - proc create_quicklist {key entries} { - r del $key - foreach entry $entries { r rpush $key $entry } - assert_encoding quicklist $key - } - -foreach {type large} [array get largevalue] { - test "LPOS basic usage - $type" { - r DEL mylist - r RPUSH mylist a b c $large 2 3 c c - assert {[r LPOS mylist a] == 0} - assert {[r LPOS mylist c] == 2} - } - - test {LPOS RANK (positive, negative and zero rank) option} { - assert {[r LPOS mylist c RANK 1] == 2} - assert {[r LPOS mylist c RANK 2] == 6} - assert {[r LPOS mylist c RANK 4] eq ""} - assert {[r LPOS mylist c RANK -1] == 7} - assert {[r LPOS mylist c RANK -2] == 6} - assert_error "*RANK can't be zero: use 1 to start from the first match, 2 from the second ... or use negative to start*" {r LPOS mylist c RANK 0} - assert_error "*value is out of range*" {r LPOS mylist c RANK -9223372036854775808} - } - - test {LPOS COUNT option} { - assert {[r LPOS mylist c COUNT 0] == {2 6 7}} - assert {[r LPOS mylist c COUNT 1] == {2}} - assert {[r LPOS mylist c COUNT 2] == {2 6}} - assert {[r LPOS mylist c COUNT 100] == {2 6 7}} - } - - test {LPOS COUNT + RANK option} { - assert {[r LPOS mylist c COUNT 0 RANK 2] == {6 7}} - assert {[r LPOS mylist c COUNT 2 RANK -1] == {7 6}} - } - - test {LPOS non existing key} { - assert {[r LPOS mylistxxx c COUNT 0 RANK 2] eq {}} - } - - test {LPOS no match} { - assert {[r LPOS mylist x COUNT 2 RANK -1] eq {}} - assert {[r LPOS mylist x RANK -1] eq {}} - } - - test {LPOS MAXLEN} { - assert {[r LPOS mylist a COUNT 0 MAXLEN 1] == {0}} - assert {[r LPOS mylist c COUNT 0 MAXLEN 1] == {}} - assert {[r LPOS mylist c COUNT 0 MAXLEN 3] == {2}} - assert {[r LPOS mylist c COUNT 0 MAXLEN 3 RANK -1] == {7 6}} - assert {[r LPOS mylist c COUNT 0 MAXLEN 7 RANK 2] == {6}} - } - - test {LPOS when RANK is greater than matches} { - r DEL mylist - r LPUSH mylist a - assert {[r LPOS mylist b COUNT 10 RANK 5] eq {}} - } - - test "LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - $type" { - # first lpush then rpush - r del mylist1 - assert_equal 1 [r lpush mylist1 $large] - assert_encoding $type mylist1 - assert_equal 2 [r rpush mylist1 b] - assert_equal 3 [r rpush mylist1 c] - assert_equal 3 [r llen mylist1] - assert_equal $large [r lindex mylist1 0] - assert_equal b [r lindex mylist1 1] - assert_equal c [r lindex mylist1 2] - assert_equal {} [r lindex mylist1 3] - assert_equal c [r rpop mylist1] - assert_equal $large [r lpop mylist1] - - # first rpush then lpush - r del mylist2 - assert_equal 1 [r rpush mylist2 $large] - assert_equal 2 [r lpush mylist2 b] - assert_equal 3 [r lpush mylist2 c] - assert_encoding $type mylist2 - assert_equal 3 [r llen mylist2] - assert_equal c [r lindex mylist2 0] - assert_equal b [r lindex mylist2 1] - assert_equal $large [r lindex mylist2 2] - assert_equal {} [r lindex mylist2 3] - assert_equal $large [r rpop mylist2] - assert_equal c [r lpop mylist2] - } - - test "LPOP/RPOP with wrong number of arguments" { - assert_error {*wrong number of arguments for 'lpop' command} {r lpop key 1 1} - assert_error {*wrong number of arguments for 'rpop' command} {r rpop key 2 2} - } - - test "RPOP/LPOP with the optional count argument - $type" { - assert_equal 7 [r lpush listcount aa $large cc dd ee ff gg] - assert_equal {gg} [r lpop listcount 1] - assert_equal {ff ee} [r lpop listcount 2] - assert_equal "aa $large" [r rpop listcount 2] - assert_equal {cc} [r rpop listcount 1] - assert_equal {dd} [r rpop listcount 123] - assert_error "*ERR*range*" {r lpop forbarqaz -123} - } -} - - proc verify_resp_response {resp response resp2_response resp3_response} { - if {$resp == 2} { - assert_equal $response $resp2_response - } elseif {$resp == 3} { - assert_equal $response $resp3_response - } - } - - foreach resp {3 2} { - if {[lsearch $::denytags "resp3"] >= 0} { - if {$resp == 3} {continue} - } elseif {$::force_resp3} { - if {$resp == 2} {continue} - } - r hello $resp - - # Make sure we can distinguish between an empty array and a null response - r readraw 1 - - test "LPOP/RPOP with the count 0 returns an empty array in RESP$resp" { - r lpush listcount zero - assert_equal {*0} [r lpop listcount 0] - assert_equal {*0} [r rpop listcount 0] - } - - test "LPOP/RPOP against non existing key in RESP$resp" { - r del non_existing_key - - verify_resp_response $resp [r lpop non_existing_key] {$-1} {_} - verify_resp_response $resp [r rpop non_existing_key] {$-1} {_} - } - - test "LPOP/RPOP with against non existing key in RESP$resp" { - r del non_existing_key - - verify_resp_response $resp [r lpop non_existing_key 0] {*-1} {_} - verify_resp_response $resp [r lpop non_existing_key 1] {*-1} {_} - - verify_resp_response $resp [r rpop non_existing_key 0] {*-1} {_} - verify_resp_response $resp [r rpop non_existing_key 1] {*-1} {_} - } - - r readraw 0 - r hello 2 - } - - test {Variadic RPUSH/LPUSH} { - r del mylist - assert_equal 4 [r lpush mylist a b c d] - assert_equal 8 [r rpush mylist 0 1 2 3] - assert_equal {d c b a 0 1 2 3} [r lrange mylist 0 -1] - } - - test {DEL a list} { - assert_equal 1 [r del mylist2] - assert_equal 0 [r exists mylist2] - assert_equal 0 [r llen mylist2] - } - - foreach {type large} [array get largevalue] { - foreach {pop} {BLPOP BLMPOP_LEFT} { - test "$pop: single existing list - $type" { - set rd [redis_deferring_client] - create_$type blist "a b $large c d" - - bpop_command $rd $pop blist 1 - assert_equal {blist a} [$rd read] - if {$pop == "BLPOP"} { - bpop_command $rd BRPOP blist 1 - } else { - bpop_command $rd BLMPOP_RIGHT blist 1 - } - assert_equal {blist d} [$rd read] - - bpop_command $rd $pop blist 1 - assert_equal {blist b} [$rd read] - if {$pop == "BLPOP"} { - bpop_command $rd BRPOP blist 1 - } else { - bpop_command $rd BLMPOP_RIGHT blist 1 - } - assert_equal {blist c} [$rd read] - - assert_equal 1 [r llen blist] - $rd close - } - - test "$pop: multiple existing lists - $type" { - set rd [redis_deferring_client] - create_$type blist1{t} "a $large c" - create_$type blist2{t} "d $large f" - - bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 - assert_equal {blist1{t} a} [$rd read] - if {$pop == "BLPOP"} { - bpop_command_two_key $rd BRPOP blist1{t} blist2{t} 1 - } else { - bpop_command_two_key $rd BLMPOP_RIGHT blist1{t} blist2{t} 1 - } - assert_equal {blist1{t} c} [$rd read] - assert_equal 1 [r llen blist1{t}] - assert_equal 3 [r llen blist2{t}] - - bpop_command_two_key $rd $pop blist2{t} blist1{t} 1 - assert_equal {blist2{t} d} [$rd read] - if {$pop == "BLPOP"} { - bpop_command_two_key $rd BRPOP blist2{t} blist1{t} 1 - } else { - bpop_command_two_key $rd BLMPOP_RIGHT blist2{t} blist1{t} 1 - } - assert_equal {blist2{t} f} [$rd read] - assert_equal 1 [r llen blist1{t}] - assert_equal 1 [r llen blist2{t}] - $rd close - } - - test "$pop: second list has an entry - $type" { - set rd [redis_deferring_client] - r del blist1{t} - create_$type blist2{t} "d $large f" - - bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 - assert_equal {blist2{t} d} [$rd read] - if {$pop == "BLPOP"} { - bpop_command_two_key $rd BRPOP blist1{t} blist2{t} 1 - } else { - bpop_command_two_key $rd BLMPOP_RIGHT blist1{t} blist2{t} 1 - } - assert_equal {blist2{t} f} [$rd read] - assert_equal 0 [r llen blist1{t}] - assert_equal 1 [r llen blist2{t}] - $rd close - } - } - - test "BRPOPLPUSH - $type" { - r del target{t} - r rpush target{t} bar - - set rd [redis_deferring_client] - create_$type blist{t} "a b $large c d" - - $rd brpoplpush blist{t} target{t} 1 - assert_equal d [$rd read] - - assert_equal d [r lpop target{t}] - assert_equal "a b $large c" [r lrange blist{t} 0 -1] - $rd close - } - - foreach wherefrom {left right} { - foreach whereto {left right} { - test "BLMOVE $wherefrom $whereto - $type" { - r del target{t} - r rpush target{t} bar - - set rd [redis_deferring_client] - create_$type blist{t} "a b $large c d" - - $rd blmove blist{t} target{t} $wherefrom $whereto 1 - set poppedelement [$rd read] - - if {$wherefrom eq "right"} { - assert_equal d $poppedelement - assert_equal "a b $large c" [r lrange blist{t} 0 -1] - } else { - assert_equal a $poppedelement - assert_equal "b $large c d" [r lrange blist{t} 0 -1] - } - - if {$whereto eq "right"} { - assert_equal $poppedelement [r rpop target{t}] - } else { - assert_equal $poppedelement [r lpop target{t}] - } - $rd close - } - } - } - } - -foreach {pop} {BLPOP BLMPOP_LEFT} { - test "$pop, LPUSH + DEL should not awake blocked client" { - set rd [redis_deferring_client] - r del list - - bpop_command $rd $pop list 0 - wait_for_blocked_client - - r multi - r lpush list a - r del list - r exec - r del list - r lpush list b - assert_equal {list b} [$rd read] - $rd close - } - - test "$pop, LPUSH + DEL + SET should not awake blocked client" { - set rd [redis_deferring_client] - r del list - - bpop_command $rd $pop list 0 - wait_for_blocked_client - - r multi - r lpush list a - r del list - r set list foo - r exec - r del list - r lpush list b - assert_equal {list b} [$rd read] - $rd close - } -} - - test "BLPOP with same key multiple times should work (issue #801)" { - set rd [redis_deferring_client] - r del list1{t} list2{t} - - # Data arriving after the BLPOP. - $rd blpop list1{t} list2{t} list2{t} list1{t} 0 - wait_for_blocked_client - r lpush list1{t} a - assert_equal [$rd read] {list1{t} a} - $rd blpop list1{t} list2{t} list2{t} list1{t} 0 - wait_for_blocked_client - r lpush list2{t} b - assert_equal [$rd read] {list2{t} b} - - # Data already there. - r lpush list1{t} a - r lpush list2{t} b - $rd blpop list1{t} list2{t} list2{t} list1{t} 0 - assert_equal [$rd read] {list1{t} a} - $rd blpop list1{t} list2{t} list2{t} list1{t} 0 - assert_equal [$rd read] {list2{t} b} - $rd close - } - -foreach {pop} {BLPOP BLMPOP_LEFT} { - test "MULTI/EXEC is isolated from the point of view of $pop" { - set rd [redis_deferring_client] - r del list - - bpop_command $rd $pop list 0 - wait_for_blocked_client - - r multi - r lpush list a - r lpush list b - r lpush list c - r exec - assert_equal {list c} [$rd read] - $rd close - } - - test "$pop with variadic LPUSH" { - set rd [redis_deferring_client] - r del blist - bpop_command $rd $pop blist 0 - wait_for_blocked_client - assert_equal 2 [r lpush blist foo bar] - assert_equal {blist bar} [$rd read] - assert_equal foo [lindex [r lrange blist 0 -1] 0] - $rd close - } -} - - test "BRPOPLPUSH with zero timeout should block indefinitely" { - set rd [redis_deferring_client] - r del blist{t} target{t} - r rpush target{t} bar - $rd brpoplpush blist{t} target{t} 0 - wait_for_blocked_clients_count 1 - r rpush blist{t} foo - assert_equal foo [$rd read] - assert_equal {foo bar} [r lrange target{t} 0 -1] - $rd close - } - - foreach wherefrom {left right} { - foreach whereto {left right} { - test "BLMOVE $wherefrom $whereto with zero timeout should block indefinitely" { - set rd [redis_deferring_client] - r del blist{t} target{t} - r rpush target{t} bar - $rd blmove blist{t} target{t} $wherefrom $whereto 0 - wait_for_blocked_clients_count 1 - r rpush blist{t} foo - assert_equal foo [$rd read] - if {$whereto eq "right"} { - assert_equal {bar foo} [r lrange target{t} 0 -1] - } else { - assert_equal {foo bar} [r lrange target{t} 0 -1] - } - $rd close - } - } - } - - foreach wherefrom {left right} { - foreach whereto {left right} { - test "BLMOVE ($wherefrom, $whereto) with a client BLPOPing the target list" { - set rd [redis_deferring_client] - set rd2 [redis_deferring_client] - r del blist{t} target{t} - $rd2 blpop target{t} 0 - wait_for_blocked_clients_count 1 - $rd blmove blist{t} target{t} $wherefrom $whereto 0 - wait_for_blocked_clients_count 2 - r rpush blist{t} foo - assert_equal foo [$rd read] - assert_equal {target{t} foo} [$rd2 read] - assert_equal 0 [r exists target{t}] - $rd close - $rd2 close - } - } - } - - test "BRPOPLPUSH with wrong source type" { - set rd [redis_deferring_client] - r del blist{t} target{t} - r set blist{t} nolist - $rd brpoplpush blist{t} target{t} 1 - assert_error "WRONGTYPE*" {$rd read} - $rd close - } - - test "BRPOPLPUSH with wrong destination type" { - set rd [redis_deferring_client] - r del blist{t} target{t} - r set target{t} nolist - r lpush blist{t} foo - $rd brpoplpush blist{t} target{t} 1 - assert_error "WRONGTYPE*" {$rd read} - $rd close - - set rd [redis_deferring_client] - r del blist{t} target{t} - r set target{t} nolist - $rd brpoplpush blist{t} target{t} 0 - wait_for_blocked_clients_count 1 - r rpush blist{t} foo - assert_error "WRONGTYPE*" {$rd read} - assert_equal {foo} [r lrange blist{t} 0 -1] - $rd close - } - - test "BRPOPLPUSH maintains order of elements after failure" { - set rd [redis_deferring_client] - r del blist{t} target{t} - r set target{t} nolist - $rd brpoplpush blist{t} target{t} 0 - wait_for_blocked_client - r rpush blist{t} a b c - assert_error "WRONGTYPE*" {$rd read} - $rd close - r lrange blist{t} 0 -1 - } {a b c} - - test "BRPOPLPUSH with multiple blocked clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - r del blist{t} target1{t} target2{t} - r set target1{t} nolist - $rd1 brpoplpush blist{t} target1{t} 0 - wait_for_blocked_clients_count 1 - $rd2 brpoplpush blist{t} target2{t} 0 - wait_for_blocked_clients_count 2 - r lpush blist{t} foo - - assert_error "WRONGTYPE*" {$rd1 read} - assert_equal {foo} [$rd2 read] - assert_equal {foo} [r lrange target2{t} 0 -1] - $rd1 close - $rd2 close - } - - test "BLMPOP with multiple blocked clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] - set rd4 [redis_deferring_client] - r del blist{t} blist2{t} - - $rd1 blmpop 0 2 blist{t} blist2{t} left count 1 - wait_for_blocked_clients_count 1 - $rd2 blmpop 0 2 blist{t} blist2{t} right count 10 - wait_for_blocked_clients_count 2 - $rd3 blmpop 0 2 blist{t} blist2{t} left count 10 - wait_for_blocked_clients_count 3 - $rd4 blmpop 0 2 blist{t} blist2{t} right count 1 - wait_for_blocked_clients_count 4 - - r multi - r lpush blist{t} a b c d e - r lpush blist2{t} 1 2 3 4 5 - r exec - - assert_equal {blist{t} e} [$rd1 read] - assert_equal {blist{t} {a b c d}} [$rd2 read] - assert_equal {blist2{t} {5 4 3 2 1}} [$rd3 read] - - r lpush blist2{t} 1 2 3 - assert_equal {blist2{t} 1} [$rd4 read] - $rd1 close - $rd2 close - $rd3 close - $rd4 close - } - - test "Linked LMOVEs" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - r del list1{t} list2{t} list3{t} - - $rd1 blmove list1{t} list2{t} right left 0 - wait_for_blocked_clients_count 1 - $rd2 blmove list2{t} list3{t} left right 0 - wait_for_blocked_clients_count 2 - - r rpush list1{t} foo - - assert_equal {} [r lrange list1{t} 0 -1] - assert_equal {} [r lrange list2{t} 0 -1] - assert_equal {foo} [r lrange list3{t} 0 -1] - $rd1 close - $rd2 close - } - - test "Circular BRPOPLPUSH" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - r del list1{t} list2{t} - - $rd1 brpoplpush list1{t} list2{t} 0 - wait_for_blocked_clients_count 1 - $rd2 brpoplpush list2{t} list1{t} 0 - wait_for_blocked_clients_count 2 - - r rpush list1{t} foo - - assert_equal {foo} [r lrange list1{t} 0 -1] - assert_equal {} [r lrange list2{t} 0 -1] - $rd1 close - $rd2 close - } - - test "Self-referential BRPOPLPUSH" { - set rd [redis_deferring_client] - - r del blist{t} - - $rd brpoplpush blist{t} blist{t} 0 - wait_for_blocked_client - - r rpush blist{t} foo - - assert_equal {foo} [r lrange blist{t} 0 -1] - $rd close - } - - test "BRPOPLPUSH inside a transaction" { - r del xlist{t} target{t} - r lpush xlist{t} foo - r lpush xlist{t} bar - - r multi - r brpoplpush xlist{t} target{t} 0 - r brpoplpush xlist{t} target{t} 0 - r brpoplpush xlist{t} target{t} 0 - r lrange xlist{t} 0 -1 - r lrange target{t} 0 -1 - r exec - } {foo bar {} {} {bar foo}} - - test "PUSH resulting from BRPOPLPUSH affect WATCH" { - set blocked_client [redis_deferring_client] - set watching_client [redis_deferring_client] - r del srclist{t} dstlist{t} somekey{t} - r set somekey{t} somevalue - $blocked_client brpoplpush srclist{t} dstlist{t} 0 - wait_for_blocked_client - $watching_client watch dstlist{t} - $watching_client read - $watching_client multi - $watching_client read - $watching_client get somekey{t} - $watching_client read - r lpush srclist{t} element - $watching_client exec - set res [$watching_client read] - $blocked_client close - $watching_client close - set _ $res - } {} - - test "BRPOPLPUSH does not affect WATCH while still blocked" { - set blocked_client [redis_deferring_client] - set watching_client [redis_deferring_client] - r del srclist{t} dstlist{t} somekey{t} - r set somekey{t} somevalue - $blocked_client brpoplpush srclist{t} dstlist{t} 0 - wait_for_blocked_client - $watching_client watch dstlist{t} - $watching_client read - $watching_client multi - $watching_client read - $watching_client get somekey{t} - $watching_client read - $watching_client exec - # Blocked BLPOPLPUSH may create problems, unblock it. - r lpush srclist{t} element - set res [$watching_client read] - $blocked_client close - $watching_client close - set _ $res - } {somevalue} - - test {BRPOPLPUSH timeout} { - set rd [redis_deferring_client] - - $rd brpoplpush foo_list{t} bar_list{t} 1 - wait_for_blocked_clients_count 1 - wait_for_blocked_clients_count 0 500 10 - set res [$rd read] - $rd close - set _ $res - } {} - - test {SWAPDB awakes blocked client} { - r flushall - r select 1 - r rpush k hello - r select 9 - set rd [redis_deferring_client] - $rd brpop k 5 - wait_for_blocked_clients_count 1 - r swapdb 1 9 - $rd read - } {k hello} {singledb:skip} - - test {SWAPDB wants to wake blocked client, but the key already expired} { - set repl [attach_to_replication_stream] - r flushall - r debug set-active-expire 0 - r select 1 - r rpush k hello - r pexpire k 100 - set rd [redis_deferring_client] - $rd deferred 0 - $rd select 9 - set id [$rd client id] - $rd deferred 1 - $rd brpop k 1 - wait_for_blocked_clients_count 1 - after 101 - r swapdb 1 9 - # The SWAPDB command tries to awake the blocked client, but it remains - # blocked because the key is expired. Check that the deferred client is - # still blocked. Then unblock it. - assert_match "*flags=b*" [r client list id $id] - r client unblock $id - assert_equal {} [$rd read] - $rd deferred 0 - # We want to force key deletion to be propagated to the replica - # in order to verify it was expired on the replication stream. - $rd set somekey1 someval1 - $rd exists k - r set somekey2 someval2 - - assert_replication_stream $repl { - {select *} - {flushall} - {select 1} - {rpush k hello} - {pexpireat k *} - {swapdb 1 9} - {select 9} - {set somekey1 someval1} - {del k} - {select 1} - {set somekey2 someval2} - } - close_replication_stream $repl - r debug set-active-expire 1 - # Restore server and client state - r select 9 - } {OK} {singledb:skip needs:debug} - - test {MULTI + LPUSH + EXPIRE + DEBUG SLEEP on blocked client, key already expired} { - set repl [attach_to_replication_stream] - r flushall - r debug set-active-expire 0 - - set rd [redis_deferring_client] - $rd client id - set id [$rd read] - $rd brpop k 0 - wait_for_blocked_clients_count 1 - - r multi - r rpush k hello - r pexpire k 100 - r debug sleep 0.2 - r exec - - # The EXEC command tries to awake the blocked client, but it remains - # blocked because the key is expired. Check that the deferred client is - # still blocked. Then unblock it. - assert_match "*flags=b*" [r client list id $id] - r client unblock $id - assert_equal {} [$rd read] - # We want to force key deletion to be propagated to the replica - # in order to verify it was expired on the replication stream. - $rd exists k - assert_equal {0} [$rd read] - assert_replication_stream $repl { - {select *} - {flushall} - {multi} - {rpush k hello} - {pexpireat k *} - {exec} - {del k} - } - close_replication_stream $repl - # Restore server and client state - r debug set-active-expire 1 - r select 9 - } {OK} {singledb:skip needs:debug} - - test {BLPOP unblock but the key is expired and then block again - reprocessing command} { - r flushall - r debug set-active-expire 0 - set rd [redis_deferring_client] - - set start [clock milliseconds] - $rd blpop mylist 1 - wait_for_blocked_clients_count 1 - - # The exec will try to awake the blocked client, but the key is expired, - # so the client will be blocked again during the command reprocessing. - r multi - r rpush mylist a - r pexpire mylist 100 - r debug sleep 0.2 - r exec - - assert_equal {} [$rd read] - set end [clock milliseconds] - - # Before the fix in #13004, this time would have been 1200+ (i.e. more than 1200ms), - # now it should be 1000, but in order to avoid timing issues, we increase the range a bit. - assert_range [expr $end-$start] 1000 1150 - - r debug set-active-expire 1 - $rd close - } {0} {needs:debug} - -foreach {pop} {BLPOP BLMPOP_LEFT} { - test "$pop when new key is moved into place" { - set rd [redis_deferring_client] - r del foo{t} - - bpop_command $rd $pop foo{t} 0 - wait_for_blocked_client - r lpush bob{t} abc def hij - r rename bob{t} foo{t} - set res [$rd read] - $rd close - set _ $res - } {foo{t} hij} - - test "$pop when result key is created by SORT..STORE" { - set rd [redis_deferring_client] - - # zero out list from previous test without explicit delete - r lpop foo{t} - r lpop foo{t} - r lpop foo{t} - - bpop_command $rd $pop foo{t} 5 - wait_for_blocked_client - r lpush notfoo{t} hello hola aguacate konichiwa zanzibar - r sort notfoo{t} ALPHA store foo{t} - set res [$rd read] - $rd close - set _ $res - } {foo{t} aguacate} -} - - test "BLPOP: timeout value out of range" { - # Timeout is parsed as float and multiplied by 1000, added mstime() - # and stored in long-long which might lead to out-of-range value. - # (Even though given timeout is smaller than LLONG_MAX, the result - # will be bigger) - assert_error "ERR *is out of range*" {r BLPOP blist1 0x7FFFFFFFFFFFFF} - } - - foreach {pop} {BLPOP BRPOP BLMPOP_LEFT BLMPOP_RIGHT} { - test "$pop: with single empty list argument" { - set rd [redis_deferring_client] - r del blist1 - bpop_command $rd $pop blist1 1 - wait_for_blocked_client - r rpush blist1 foo - assert_equal {blist1 foo} [$rd read] - assert_equal 0 [r exists blist1] - $rd close - } - - test "$pop: with negative timeout" { - set rd [redis_deferring_client] - bpop_command $rd $pop blist1 -1 - assert_error "ERR *is negative*" {$rd read} - $rd close - } - - test "$pop: with non-integer timeout" { - set rd [redis_deferring_client] - r del blist1 - bpop_command $rd $pop blist1 0.1 - r rpush blist1 foo - assert_equal {blist1 foo} [$rd read] - assert_equal 0 [r exists blist1] - $rd close - } - - test "$pop: with zero timeout should block indefinitely" { - # To test this, use a timeout of 0 and wait a second. - # The blocking pop should still be waiting for a push. - set rd [redis_deferring_client] - bpop_command $rd $pop blist1 0 - wait_for_blocked_client - r rpush blist1 foo - assert_equal {blist1 foo} [$rd read] - $rd close - } - - test "$pop: with 0.001 timeout should not block indefinitely" { - # Use a timeout of 0.001 and wait for the number of blocked clients to equal 0. - # Validate the empty read from the deferring client. - set rd [redis_deferring_client] - bpop_command $rd $pop blist1 0.001 - wait_for_blocked_clients_count 0 - assert_equal {} [$rd read] - $rd close - } - - test "$pop: second argument is not a list" { - set rd [redis_deferring_client] - r del blist1{t} blist2{t} - r set blist2{t} nolist{t} - bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 - assert_error "WRONGTYPE*" {$rd read} - $rd close - } - - test "$pop: timeout" { - set rd [redis_deferring_client] - r del blist1{t} blist2{t} - bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 - wait_for_blocked_client - assert_equal {} [$rd read] - $rd close - } - - test "$pop: arguments are empty" { - set rd [redis_deferring_client] - r del blist1{t} blist2{t} - - bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 - wait_for_blocked_client - r rpush blist1{t} foo - assert_equal {blist1{t} foo} [$rd read] - assert_equal 0 [r exists blist1{t}] - assert_equal 0 [r exists blist2{t}] - - bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 - wait_for_blocked_client - r rpush blist2{t} foo - assert_equal {blist2{t} foo} [$rd read] - assert_equal 0 [r exists blist1{t}] - assert_equal 0 [r exists blist2{t}] - $rd close - } - } - -foreach {pop} {BLPOP BLMPOP_LEFT} { - test "$pop inside a transaction" { - r del xlist - r lpush xlist foo - r lpush xlist bar - r multi - - bpop_command r $pop xlist 0 - bpop_command r $pop xlist 0 - bpop_command r $pop xlist 0 - r exec - } {{xlist bar} {xlist foo} {}} -} - - test {BLMPOP propagate as pop with count command to replica} { - set rd [redis_deferring_client] - set repl [attach_to_replication_stream] - - # BLMPOP without being blocked. - r lpush mylist{t} a b c - r rpush mylist2{t} 1 2 3 - r blmpop 0 1 mylist{t} left count 1 - r blmpop 0 2 mylist{t} mylist2{t} right count 10 - r blmpop 0 2 mylist{t} mylist2{t} right count 10 - - # BLMPOP that gets blocked. - $rd blmpop 0 1 mylist{t} left count 1 - wait_for_blocked_client - r lpush mylist{t} a - $rd blmpop 0 2 mylist{t} mylist2{t} left count 5 - wait_for_blocked_client - r lpush mylist{t} a b c - $rd blmpop 0 2 mylist{t} mylist2{t} right count 10 - wait_for_blocked_client - r rpush mylist2{t} a b c - - # Released on timeout. - assert_equal {} [r blmpop 0.01 1 mylist{t} left count 10] - r set foo{t} bar ;# something else to propagate after, so we can make sure the above pop didn't. - - $rd close - - assert_replication_stream $repl { - {select *} - {lpush mylist{t} a b c} - {rpush mylist2{t} 1 2 3} - {lpop mylist{t} 1} - {rpop mylist{t} 2} - {rpop mylist2{t} 3} - {lpush mylist{t} a} - {lpop mylist{t} 1} - {lpush mylist{t} a b c} - {lpop mylist{t} 3} - {rpush mylist2{t} a b c} - {rpop mylist2{t} 3} - {set foo{t} bar} - } - close_replication_stream $repl - } {} {needs:repl} - - test {LPUSHX, RPUSHX - generic} { - r del xlist - assert_equal 0 [r lpushx xlist a] - assert_equal 0 [r llen xlist] - assert_equal 0 [r rpushx xlist a] - assert_equal 0 [r llen xlist] - } - - foreach {type large} [array get largevalue] { - test "LPUSHX, RPUSHX - $type" { - create_$type xlist "$large c" - assert_equal 3 [r rpushx xlist d] - assert_equal 4 [r lpushx xlist a] - assert_equal 6 [r rpushx xlist 42 x] - assert_equal 9 [r lpushx xlist y3 y2 y1] - assert_equal "y1 y2 y3 a $large c d 42 x" [r lrange xlist 0 -1] - } - - test "LINSERT - $type" { - create_$type xlist "a $large c d" - assert_equal 5 [r linsert xlist before c zz] "before c" - assert_equal "a $large zz c d" [r lrange xlist 0 10] "lrangeA" - assert_equal 6 [r linsert xlist after c yy] "after c" - assert_equal "a $large zz c yy d" [r lrange xlist 0 10] "lrangeB" - assert_equal 7 [r linsert xlist after d dd] "after d" - assert_equal -1 [r linsert xlist after bad ddd] "after bad" - assert_equal "a $large zz c yy d dd" [r lrange xlist 0 10] "lrangeC" - assert_equal 8 [r linsert xlist before a aa] "before a" - assert_equal -1 [r linsert xlist before bad aaa] "before bad" - assert_equal "aa a $large zz c yy d dd" [r lrange xlist 0 10] "lrangeD" - - # check inserting integer encoded value - assert_equal 9 [r linsert xlist before aa 42] "before aa" - assert_equal 42 [r lrange xlist 0 0] "lrangeE" - } - } - - test {LINSERT raise error on bad syntax} { - catch {[r linsert xlist aft3r aa 42]} e - set e - } {*ERR*syntax*error*} - - test {LINSERT against non-list value error} { - r set k1 v1 - assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r linsert k1 after 0 0} - } - - test {LINSERT against non existing key} { - assert_equal 0 [r linsert not-a-key before 0 0] - } - -foreach type {listpack quicklist} { - foreach {num} {250 500} { - if {$type == "quicklist"} { - set origin_config [config_get_set list-max-listpack-size 5] - } else { - set origin_config [config_get_set list-max-listpack-size -1] - } - - proc check_numbered_list_consistency {key} { - set len [r llen $key] - for {set i 0} {$i < $len} {incr i} { - assert_equal $i [r lindex $key $i] - assert_equal [expr $len-1-$i] [r lindex $key [expr (-$i)-1]] - } - } - - proc check_random_access_consistency {key} { - set len [r llen $key] - for {set i 0} {$i < $len} {incr i} { - set rint [expr int(rand()*$len)] - assert_equal $rint [r lindex $key $rint] - assert_equal [expr $len-1-$rint] [r lindex $key [expr (-$rint)-1]] - } - } - - test "LINDEX consistency test - $type" { - r del mylist - for {set i 0} {$i < $num} {incr i} { - r rpush mylist $i - } - assert_encoding $type mylist - check_numbered_list_consistency mylist - } - - test "LINDEX random access - $type" { - assert_encoding $type mylist - check_random_access_consistency mylist - } - - test "Check if list is still ok after a DEBUG RELOAD - $type" { - r debug reload - assert_encoding $type mylist - check_numbered_list_consistency mylist - check_random_access_consistency mylist - } {} {needs:debug} - - config_set list-max-listpack-size $origin_config - } -} - - test {LLEN against non-list value error} { - r del mylist - r set mylist foobar - assert_error WRONGTYPE* {r llen mylist} - } - - test {LLEN against non existing key} { - assert_equal 0 [r llen not-a-key] - } - - test {LINDEX against non-list value error} { - assert_error WRONGTYPE* {r lindex mylist 0} - } - - test {LINDEX against non existing key} { - assert_equal "" [r lindex not-a-key 10] - } - - test {LPUSH against non-list value error} { - assert_error WRONGTYPE* {r lpush mylist 0} - } - - test {RPUSH against non-list value error} { - assert_error WRONGTYPE* {r rpush mylist 0} - } - - foreach {type large} [array get largevalue] { - test "RPOPLPUSH base case - $type" { - r del mylist1{t} mylist2{t} - create_$type mylist1{t} "a $large c d" - assert_equal d [r rpoplpush mylist1{t} mylist2{t}] - assert_equal c [r rpoplpush mylist1{t} mylist2{t}] - assert_equal $large [r rpoplpush mylist1{t} mylist2{t}] - assert_equal "a" [r lrange mylist1{t} 0 -1] - assert_equal "$large c d" [r lrange mylist2{t} 0 -1] - assert_encoding listpack mylist1{t} ;# converted to listpack after shrinking - assert_encoding $type mylist2{t} - } - - foreach wherefrom {left right} { - foreach whereto {left right} { - test "LMOVE $wherefrom $whereto base case - $type" { - r del mylist1{t} mylist2{t} - - if {$wherefrom eq "right"} { - create_$type mylist1{t} "c d $large a" - } else { - create_$type mylist1{t} "a $large c d" - } - assert_equal a [r lmove mylist1{t} mylist2{t} $wherefrom $whereto] - assert_equal $large [r lmove mylist1{t} mylist2{t} $wherefrom $whereto] - assert_equal "c d" [r lrange mylist1{t} 0 -1] - if {$whereto eq "right"} { - assert_equal "a $large" [r lrange mylist2{t} 0 -1] - } else { - assert_equal "$large a" [r lrange mylist2{t} 0 -1] - } - assert_encoding $type mylist2{t} - } - } - } - - test "RPOPLPUSH with the same list as src and dst - $type" { - create_$type mylist{t} "a $large c" - assert_equal "a $large c" [r lrange mylist{t} 0 -1] - assert_equal c [r rpoplpush mylist{t} mylist{t}] - assert_equal "c a $large" [r lrange mylist{t} 0 -1] - } - - foreach wherefrom {left right} { - foreach whereto {left right} { - test "LMOVE $wherefrom $whereto with the same list as src and dst - $type" { - if {$wherefrom eq "right"} { - create_$type mylist{t} "a $large c" - assert_equal "a $large c" [r lrange mylist{t} 0 -1] - } else { - create_$type mylist{t} "c a $large" - assert_equal "c a $large" [r lrange mylist{t} 0 -1] - } - assert_equal c [r lmove mylist{t} mylist{t} $wherefrom $whereto] - if {$whereto eq "right"} { - assert_equal "a $large c" [r lrange mylist{t} 0 -1] - } else { - assert_equal "c a $large" [r lrange mylist{t} 0 -1] - } - } - } - } - - foreach {othertype otherlarge} [array get largevalue] { - test "RPOPLPUSH with $type source and existing target $othertype" { - create_$type srclist{t} "a b c $large" - create_$othertype dstlist{t} "$otherlarge" - assert_equal $large [r rpoplpush srclist{t} dstlist{t}] - assert_equal c [r rpoplpush srclist{t} dstlist{t}] - assert_equal "a b" [r lrange srclist{t} 0 -1] - assert_equal "c $large $otherlarge" [r lrange dstlist{t} 0 -1] - - # When we rpoplpush'ed a large value, dstlist should be - # converted to the same encoding as srclist. - if {$type eq "quicklist"} { - assert_encoding quicklist dstlist{t} - } - } - - foreach wherefrom {left right} { - foreach whereto {left right} { - test "LMOVE $wherefrom $whereto with $type source and existing target $othertype" { - create_$othertype dstlist{t} "$otherlarge" - - if {$wherefrom eq "right"} { - create_$type srclist{t} "a b c $large" - } else { - create_$type srclist{t} "$large c a b" - } - assert_equal $large [r lmove srclist{t} dstlist{t} $wherefrom $whereto] - assert_equal c [r lmove srclist{t} dstlist{t} $wherefrom $whereto] - assert_equal "a b" [r lrange srclist{t} 0 -1] - - if {$whereto eq "right"} { - assert_equal "$otherlarge $large c" [r lrange dstlist{t} 0 -1] - } else { - assert_equal "c $large $otherlarge" [r lrange dstlist{t} 0 -1] - } - - # When we lmoved a large value, dstlist should be - # converted to the same encoding as srclist. - if {$type eq "quicklist"} { - assert_encoding quicklist dstlist{t} - } - } - } - } - } - } - - test {RPOPLPUSH against non existing key} { - r del srclist{t} dstlist{t} - assert_equal {} [r rpoplpush srclist{t} dstlist{t}] - assert_equal 0 [r exists srclist{t}] - assert_equal 0 [r exists dstlist{t}] - } - - test {RPOPLPUSH against non list src key} { - r del srclist{t} dstlist{t} - r set srclist{t} x - assert_error WRONGTYPE* {r rpoplpush srclist{t} dstlist{t}} - assert_type string srclist{t} - assert_equal 0 [r exists newlist{t}] - } - -foreach {type large} [array get largevalue] { - test "RPOPLPUSH against non list dst key - $type" { - create_$type srclist{t} "a $large c d" - r set dstlist{t} x - assert_error WRONGTYPE* {r rpoplpush srclist{t} dstlist{t}} - assert_type string dstlist{t} - assert_equal "a $large c d" [r lrange srclist{t} 0 -1] - } -} - - test {RPOPLPUSH against non existing src key} { - r del srclist{t} dstlist{t} - assert_equal {} [r rpoplpush srclist{t} dstlist{t}] - } {} - - foreach {type large} [array get largevalue] { - test "Basic LPOP/RPOP/LMPOP - $type" { - create_$type mylist "$large 1 2" - assert_equal $large [r lpop mylist] - assert_equal 2 [r rpop mylist] - assert_equal 1 [r lpop mylist] - assert_equal 0 [r llen mylist] - - create_$type mylist "$large 1 2" - assert_equal "mylist $large" [r lmpop 1 mylist left count 1] - assert_equal {mylist {2 1}} [r lmpop 2 mylist mylist right count 2] - } - } - - test {LPOP/RPOP/LMPOP against empty list} { - r del non-existing-list{t} non-existing-list2{t} - - assert_equal {} [r lpop non-existing-list{t}] - assert_equal {} [r rpop non-existing-list2{t}] - - assert_equal {} [r lmpop 1 non-existing-list{t} left count 1] - assert_equal {} [r lmpop 1 non-existing-list{t} left count 10] - assert_equal {} [r lmpop 2 non-existing-list{t} non-existing-list2{t} right count 1] - assert_equal {} [r lmpop 2 non-existing-list{t} non-existing-list2{t} right count 10] - } - - test {LPOP/RPOP/LMPOP NON-BLOCK or BLOCK against non list value} { - r set notalist{t} foo - assert_error WRONGTYPE* {r lpop notalist{t}} - assert_error WRONGTYPE* {r blpop notalist{t} 0} - assert_error WRONGTYPE* {r rpop notalist{t}} - assert_error WRONGTYPE* {r brpop notalist{t} 0} - - r del notalist2{t} - assert_error "WRONGTYPE*" {r lmpop 2 notalist{t} notalist2{t} left count 1} - assert_error "WRONGTYPE*" {r blmpop 0 2 notalist{t} notalist2{t} left count 1} - - r del notalist{t} - r set notalist2{t} nolist - assert_error "WRONGTYPE*" {r lmpop 2 notalist{t} notalist2{t} right count 10} - assert_error "WRONGTYPE*" {r blmpop 0 2 notalist{t} notalist2{t} left count 1} - } - - foreach {num} {250 500} { - test "Mass RPOP/LPOP - $type" { - r del mylist - set sum1 0 - for {set i 0} {$i < $num} {incr i} { - if {$i == [expr $num/2]} { - r lpush mylist $large - } - r lpush mylist $i - incr sum1 $i - } - assert_encoding $type mylist - set sum2 0 - for {set i 0} {$i < [expr $num/2]} {incr i} { - incr sum2 [r lpop mylist] - incr sum2 [r rpop mylist] - } - assert_equal $sum1 $sum2 - } - } - - test {LMPOP with illegal argument} { - assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop} - assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop 1} - assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop 1 mylist{t}} - - assert_error "ERR numkeys*" {r lmpop 0 mylist{t} LEFT} - assert_error "ERR numkeys*" {r lmpop a mylist{t} LEFT} - assert_error "ERR numkeys*" {r lmpop -1 mylist{t} RIGHT} - - assert_error "ERR syntax error*" {r lmpop 1 mylist{t} bad_where} - assert_error "ERR syntax error*" {r lmpop 1 mylist{t} LEFT bar_arg} - assert_error "ERR syntax error*" {r lmpop 1 mylist{t} RIGHT LEFT} - assert_error "ERR syntax error*" {r lmpop 1 mylist{t} COUNT} - assert_error "ERR syntax error*" {r lmpop 1 mylist{t} LEFT COUNT 1 COUNT 2} - assert_error "ERR syntax error*" {r lmpop 2 mylist{t} mylist2{t} bad_arg} - - assert_error "ERR count*" {r lmpop 1 mylist{t} LEFT COUNT 0} - assert_error "ERR count*" {r lmpop 1 mylist{t} RIGHT COUNT a} - assert_error "ERR count*" {r lmpop 1 mylist{t} LEFT COUNT -1} - assert_error "ERR count*" {r lmpop 2 mylist{t} mylist2{t} RIGHT COUNT -1} - } - -foreach {type large} [array get largevalue] { - test "LMPOP single existing list - $type" { - # Same key multiple times. - create_$type mylist{t} "a b $large d e f" - assert_equal {mylist{t} {a b}} [r lmpop 2 mylist{t} mylist{t} left count 2] - assert_equal {mylist{t} {f e}} [r lmpop 2 mylist{t} mylist{t} right count 2] - assert_equal 2 [r llen mylist{t}] - - # First one exists, second one does not exist. - create_$type mylist{t} "a b $large d e" - r del mylist2{t} - assert_equal {mylist{t} a} [r lmpop 2 mylist{t} mylist2{t} left count 1] - assert_equal 4 [r llen mylist{t}] - assert_equal "mylist{t} {e d $large b}" [r lmpop 2 mylist{t} mylist2{t} right count 10] - assert_equal {} [r lmpop 2 mylist{t} mylist2{t} right count 1] - - # First one does not exist, second one exists. - r del mylist{t} - create_$type mylist2{t} "1 2 $large 4 5" - assert_equal {mylist2{t} 5} [r lmpop 2 mylist{t} mylist2{t} right count 1] - assert_equal 4 [r llen mylist2{t}] - assert_equal "mylist2{t} {1 2 $large 4}" [r lmpop 2 mylist{t} mylist2{t} left count 10] - - assert_equal 0 [r exists mylist{t} mylist2{t}] - } - - test "LMPOP multiple existing lists - $type" { - create_$type mylist{t} "a b $large d e" - create_$type mylist2{t} "1 2 $large 4 5" - - # Pop up from the first key. - assert_equal {mylist{t} {a b}} [r lmpop 2 mylist{t} mylist2{t} left count 2] - assert_equal 3 [r llen mylist{t}] - assert_equal "mylist{t} {e d $large}" [r lmpop 2 mylist{t} mylist2{t} right count 3] - assert_equal 0 [r exists mylist{t}] - - # Pop up from the second key. - assert_equal "mylist2{t} {1 2 $large}" [r lmpop 2 mylist{t} mylist2{t} left count 3] - assert_equal 2 [r llen mylist2{t}] - assert_equal {mylist2{t} {5 4}} [r lmpop 2 mylist{t} mylist2{t} right count 2] - assert_equal 0 [r exists mylist{t}] - - # Pop up all elements. - create_$type mylist{t} "a $large c" - create_$type mylist2{t} "1 $large 3" - assert_equal "mylist{t} {a $large c}" [r lmpop 2 mylist{t} mylist2{t} left count 10] - assert_equal 0 [r llen mylist{t}] - assert_equal "mylist2{t} {3 $large 1}" [r lmpop 2 mylist{t} mylist2{t} right count 10] - assert_equal 0 [r llen mylist2{t}] - assert_equal 0 [r exists mylist{t} mylist2{t}] - } -} - - test {LMPOP propagate as pop with count command to replica} { - set repl [attach_to_replication_stream] - - # left/right propagate as lpop/rpop with count - r lpush mylist{t} a b c - - # Pop elements from one list. - r lmpop 1 mylist{t} left count 1 - r lmpop 1 mylist{t} right count 1 - - # Now the list have only one element - r lmpop 2 mylist{t} mylist2{t} left count 10 - - # No elements so we don't propagate. - r lmpop 2 mylist{t} mylist2{t} left count 10 - - # Pop elements from the second list. - r rpush mylist2{t} 1 2 3 - r lmpop 2 mylist{t} mylist2{t} left count 2 - r lmpop 2 mylist{t} mylist2{t} right count 1 - - # Pop all elements. - r rpush mylist{t} a b c - r rpush mylist2{t} 1 2 3 - r lmpop 2 mylist{t} mylist2{t} left count 10 - r lmpop 2 mylist{t} mylist2{t} right count 10 - - assert_replication_stream $repl { - {select *} - {lpush mylist{t} a b c} - {lpop mylist{t} 1} - {rpop mylist{t} 1} - {lpop mylist{t} 1} - {rpush mylist2{t} 1 2 3} - {lpop mylist2{t} 2} - {rpop mylist2{t} 1} - {rpush mylist{t} a b c} - {rpush mylist2{t} 1 2 3} - {lpop mylist{t} 3} - {rpop mylist2{t} 3} - } - close_replication_stream $repl - } {} {needs:repl} - - foreach {type large} [array get largevalue] { - test "LRANGE basics - $type" { - create_$type mylist "$large 1 2 3 4 5 6 7 8 9" - assert_equal {1 2 3 4 5 6 7 8} [r lrange mylist 1 -2] - assert_equal {7 8 9} [r lrange mylist -3 -1] - assert_equal {4} [r lrange mylist 4 4] - } - - test "LRANGE inverted indexes - $type" { - create_$type mylist "$large 1 2 3 4 5 6 7 8 9" - assert_equal {} [r lrange mylist 6 2] - } - - test "LRANGE out of range indexes including the full list - $type" { - create_$type mylist "$large 1 2 3" - assert_equal "$large 1 2 3" [r lrange mylist -1000 1000] - } - - test "LRANGE out of range negative end index - $type" { - create_$type mylist "$large 1 2 3" - assert_equal $large [r lrange mylist 0 -4] - assert_equal {} [r lrange mylist 0 -5] - } - } - - test {LRANGE against non existing key} { - assert_equal {} [r lrange nosuchkey 0 1] - } - - test {LRANGE with start > end yields an empty array for backward compatibility} { - create_$type mylist "1 $large 3" - assert_equal {} [r lrange mylist 1 0] - assert_equal {} [r lrange mylist -1 -2] - } - - foreach {type large} [array get largevalue] { - proc trim_list {type min max} { - upvar 1 large large - r del mylist - create_$type mylist "1 2 3 4 $large" - r ltrim mylist $min $max - r lrange mylist 0 -1 - } - - test "LTRIM basics - $type" { - assert_equal "1" [trim_list $type 0 0] - assert_equal "1 2" [trim_list $type 0 1] - assert_equal "1 2 3" [trim_list $type 0 2] - assert_equal "2 3" [trim_list $type 1 2] - assert_equal "2 3 4 $large" [trim_list $type 1 -1] - assert_equal "2 3 4" [trim_list $type 1 -2] - assert_equal "4 $large" [trim_list $type -2 -1] - assert_equal "$large" [trim_list $type -1 -1] - assert_equal "1 2 3 4 $large" [trim_list $type -5 -1] - assert_equal "1 2 3 4 $large" [trim_list $type -10 10] - assert_equal "1 2 3 4 $large" [trim_list $type 0 5] - assert_equal "1 2 3 4 $large" [trim_list $type 0 10] - } - - test "LTRIM out of range negative end index - $type" { - assert_equal {1} [trim_list $type 0 -5] - assert_equal {} [trim_list $type 0 -6] - } - - test "LSET - $type" { - create_$type mylist "99 98 $large 96 95" - r lset mylist 1 foo - r lset mylist -1 bar - assert_equal "99 foo $large 96 bar" [r lrange mylist 0 -1] - } - - test "LSET out of range index - $type" { - assert_error ERR*range* {r lset mylist 10 foo} - } - } - - test {LSET against non existing key} { - assert_error ERR*key* {r lset nosuchkey 10 foo} - } - - test {LSET against non list value} { - r set nolist foobar - assert_error WRONGTYPE* {r lset nolist 0 foo} - } - - foreach {type e} [array get largevalue] { - test "LREM remove all the occurrences - $type" { - create_$type mylist "$e foo bar foobar foobared zap bar test foo" - assert_equal 2 [r lrem mylist 0 bar] - assert_equal "$e foo foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM remove the first occurrence - $type" { - assert_equal 1 [r lrem mylist 1 foo] - assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM remove non existing element - $type" { - assert_equal 0 [r lrem mylist 1 nosuchelement] - assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM starting from tail with negative count - $type" { - create_$type mylist "$e foo bar foobar foobared zap bar test foo foo" - assert_equal 1 [r lrem mylist -1 bar] - assert_equal "$e foo bar foobar foobared zap test foo foo" [r lrange mylist 0 -1] - } - - test "LREM starting from tail with negative count (2) - $type" { - assert_equal 2 [r lrem mylist -2 foo] - assert_equal "$e foo bar foobar foobared zap test" [r lrange mylist 0 -1] - } - - test "LREM deleting objects that may be int encoded - $type" { - create_$type myotherlist "$e 1 2 3" - assert_equal 1 [r lrem myotherlist 1 2] - assert_equal 3 [r llen myotherlist] - } - } - - test "Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - $rd1 brpoplpush a{t} b{t} 0 - $rd1 brpoplpush a{t} b{t} 0 - wait_for_blocked_clients_count 1 - $rd2 brpoplpush b{t} c{t} 0 - wait_for_blocked_clients_count 2 - r lpush a{t} data - $rd1 close - $rd2 close - r ping - } {PONG} - - test "BLPOP/BLMOVE should increase dirty" { - r del lst{t} lst1{t} - set rd [redis_deferring_client] - - set dirty [s rdb_changes_since_last_save] - $rd blpop lst{t} 0 - wait_for_blocked_client - r lpush lst{t} a - assert_equal {lst{t} a} [$rd read] - set dirty2 [s rdb_changes_since_last_save] - assert {$dirty2 == $dirty + 2} - - set dirty [s rdb_changes_since_last_save] - $rd blmove lst{t} lst1{t} left left 0 - wait_for_blocked_client - r lpush lst{t} a - assert_equal {a} [$rd read] - set dirty2 [s rdb_changes_since_last_save] - assert {$dirty2 == $dirty + 2} - - $rd close - } - -foreach {pop} {BLPOP BLMPOP_RIGHT} { - test "client unblock tests" { - r del l - set rd [redis_deferring_client] - $rd client id - set id [$rd read] - - # test default args - bpop_command $rd $pop l 0 - wait_for_blocked_client - r client unblock $id - assert_equal {} [$rd read] - - # test with timeout - bpop_command $rd $pop l 0 - wait_for_blocked_client - r client unblock $id TIMEOUT - assert_equal {} [$rd read] - - # test with error - bpop_command $rd $pop l 0 - wait_for_blocked_client - r client unblock $id ERROR - catch {[$rd read]} e - assert_equal $e "UNBLOCKED client unblocked via CLIENT UNBLOCK" - - # test with invalid client id - catch {[r client unblock asd]} e - assert_equal $e "ERR value is not an integer or out of range" - - # test with non blocked client - set myid [r client id] - catch {[r client unblock $myid]} e - assert_equal $e {invalid command name "0"} - - # finally, see the this client and list are still functional - bpop_command $rd $pop l 0 - wait_for_blocked_client - r lpush l foo - assert_equal {l foo} [$rd read] - $rd close - } -} - - foreach {max_lp_size large} "3 $largevalue(listpack) -1 $largevalue(quicklist)" { - test "List listpack -> quicklist encoding conversion" { - set origin_conf [config_get_set list-max-listpack-size $max_lp_size] - - # RPUSH - create_listpack lst "a b c" - r RPUSH lst $large - assert_encoding quicklist lst - - # LINSERT - create_listpack lst "a b c" - r LINSERT lst after b $large - assert_encoding quicklist lst - - # LSET - create_listpack lst "a b c" - r LSET lst 0 $large - assert_encoding quicklist lst - - # LMOVE - create_quicklist lsrc{t} "a b c $large" - create_listpack ldes{t} "d e f" - r LMOVE lsrc{t} ldes{t} right right - assert_encoding quicklist ldes{t} - - r config set list-max-listpack-size $origin_conf - } - } - - test "List quicklist -> listpack encoding conversion" { - set origin_conf [config_get_set list-max-listpack-size 3] - - # RPOP - create_quicklist lst "a b c d" - r RPOP lst 3 - assert_encoding listpack lst - - # LREM - create_quicklist lst "a a a d" - r LREM lst 3 a - assert_encoding listpack lst - - # LTRIM - create_quicklist lst "a b c d" - r LTRIM lst 1 1 - assert_encoding listpack lst - - r config set list-max-listpack-size -1 - - # RPOP - create_quicklist lst "a b c $largevalue(quicklist)" - r RPOP lst 1 - assert_encoding listpack lst - - # LREM - create_quicklist lst "a $largevalue(quicklist)" - r LREM lst 1 $largevalue(quicklist) - assert_encoding listpack lst - - # LTRIM - create_quicklist lst "a b $largevalue(quicklist)" - r LTRIM lst 0 1 - assert_encoding listpack lst - - # LSET - create_quicklist lst "$largevalue(quicklist) a b" - r RPOP lst 2 - assert_encoding quicklist lst - r LSET lst -1 c - assert_encoding listpack lst - - r config set list-max-listpack-size $origin_conf - } - - test "List encoding conversion when RDB loading" { - set origin_conf [config_get_set list-max-listpack-size 3] - create_listpack lst "a b c" - - # list is still a listpack after DEBUG RELOAD - r DEBUG RELOAD - assert_encoding listpack lst - - # list is still a quicklist after DEBUG RELOAD - r RPUSH lst d - r DEBUG RELOAD - assert_encoding quicklist lst - - # when a quicklist has only one packed node, it will be - # converted to listpack during rdb loading - r RPOP lst - assert_encoding quicklist lst - r DEBUG RELOAD - assert_encoding listpack lst - - r config set list-max-listpack-size $origin_conf - } {OK} {needs:debug} - - test "List invalid list-max-listpack-size config" { - # ​When list-max-listpack-size is 0 we treat it as 1 and it'll - # still be listpack if there's a single element in the list. - r config set list-max-listpack-size 0 - r DEL lst - r RPUSH lst a - assert_encoding listpack lst - r RPUSH lst b - assert_encoding quicklist lst - - # When list-max-listpack-size < -5 we treat it as -5. - r config set list-max-listpack-size -6 - r DEL lst - r RPUSH lst [string repeat "x" 60000] - assert_encoding listpack lst - # Converted to quicklist when the size of listpack exceed 65536 - r RPUSH lst [string repeat "x" 5536] - assert_encoding quicklist lst - } - - test "List of various encodings" { - r del k - r lpush k 127 ;# ZIP_INT_8B - r lpush k 32767 ;# ZIP_INT_16B - r lpush k 2147483647 ;# ZIP_INT_32B - r lpush k 9223372036854775808 ;# ZIP_INT_64B - r lpush k 0 ;# ZIP_INT_IMM_MIN - r lpush k 12 ;# ZIP_INT_IMM_MAX - r lpush k [string repeat x 31] ;# ZIP_STR_06B - r lpush k [string repeat x 8191] ;# ZIP_STR_14B - r lpush k [string repeat x 65535] ;# ZIP_STR_32B - assert_encoding quicklist k ;# exceeds the size limit of quicklist node - set k [r lrange k 0 -1] - set dump [r dump k] - - # coverage for objectComputeSize - assert_morethan [memory_usage k] 0 - - config_set sanitize-dump-payload no mayfail - r restore kk 0 $dump replace - assert_encoding quicklist kk - set kk [r lrange kk 0 -1] - - # try some forward and backward searches to make sure all encodings - # can be traversed - assert_equal [r lindex kk 5] {9223372036854775808} - assert_equal [r lindex kk -5] {0} - assert_equal [r lpos kk foo rank 1] {} - assert_equal [r lpos kk foo rank -1] {} - - # make sure the values are right - assert_equal $k $kk - assert_equal [lpop k] [string repeat x 65535] - assert_equal [lpop k] [string repeat x 8191] - assert_equal [lpop k] [string repeat x 31] - set _ $k - } {12 0 9223372036854775808 2147483647 32767 127} - - test "List of various encodings - sanitize dump" { - config_set sanitize-dump-payload yes mayfail - r restore kk 0 $dump replace - assert_encoding quicklist kk - set k [r lrange k 0 -1] - set kk [r lrange kk 0 -1] - - # make sure the values are right - assert_equal $k $kk - assert_equal [lpop k] [string repeat x 65535] - assert_equal [lpop k] [string repeat x 8191] - assert_equal [lpop k] [string repeat x 31] - set _ $k - } {12 0 9223372036854775808 2147483647 32767 127} - - test "Unblock fairness is kept while pipelining" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - # delete the list in case already exists - r del mylist - - # block a client on the list - $rd1 BLPOP mylist 0 - wait_for_blocked_clients_count 1 - - # pipeline on other client a list push and a blocking pop - # we should expect the fairness to be kept and have $rd1 - # being unblocked - set buf "" - append buf "LPUSH mylist 1\r\n" - append buf "BLPOP mylist 0\r\n" - $rd2 write $buf - $rd2 flush - - # we check that we still have 1 blocked client - # and that the first blocked client has been served - assert_equal [$rd1 read] {mylist 1} - assert_equal [$rd2 read] {1} - wait_for_blocked_clients_count 1 - - # We no unblock the last client and verify it was served last - r LPUSH mylist 2 - wait_for_blocked_clients_count 0 - assert_equal [$rd2 read] {mylist 2} - - $rd1 close - $rd2 close - } - - test "Unblock fairness is kept during nested unblock" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] - - # delete the list in case already exists - r del l1{t} l2{t} l3{t} - - # block a client on the list - $rd1 BRPOPLPUSH l1{t} l3{t} 0 - wait_for_blocked_clients_count 1 - - $rd2 BLPOP l2{t} 0 - wait_for_blocked_clients_count 2 - - $rd3 BLMPOP 0 2 l2{t} l3{t} LEFT COUNT 1 - wait_for_blocked_clients_count 3 - - r multi - r lpush l1{t} 1 - r lpush l2{t} 2 - r exec - - wait_for_blocked_clients_count 0 - - assert_equal [$rd1 read] {1} - assert_equal [$rd2 read] {l2{t} 2} - assert_equal [$rd3 read] {l3{t} 1} - - $rd1 close - $rd2 close - $rd3 close - } - - test "Blocking command accounted only once in commandstats" { - # cleanup first - r del mylist - - # create a test client - set rd [redis_deferring_client] - - # reset the server stats - r config resetstat - - # block a client on the list - $rd BLPOP mylist 0 - wait_for_blocked_clients_count 1 - - # unblock the list - r LPUSH mylist 1 - wait_for_blocked_clients_count 0 - - assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdrstat blpop r] - - $rd close - } - - test "Blocking command accounted only once in commandstats after timeout" { - # cleanup first - r del mylist - - # create a test client - set rd [redis_deferring_client] - $rd client id - set id [$rd read] - - # reset the server stats - r config resetstat - - # block a client on the list - $rd BLPOP mylist 0 - wait_for_blocked_clients_count 1 - - # unblock the client on timeout - r client unblock $id timeout - - assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdrstat blpop r] - - $rd close - } - - test {Command being unblocked cause another command to get unblocked execution order test} { - r del src{t} dst{t} key1{t} key2{t} key3{t} - set repl [attach_to_replication_stream] - - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] - - $rd1 blmove src{t} dst{t} left right 0 - wait_for_blocked_clients_count 1 - - $rd2 blmove dst{t} src{t} right left 0 - wait_for_blocked_clients_count 2 - - # Create a pipeline of commands that will be processed in one socket read. - # Insert two set commands before and after lpush to observe the execution order. - set buf "" - append buf "set key1{t} value1\r\n" - append buf "lpush src{t} dummy\r\n" - append buf "set key2{t} value2\r\n" - $rd3 write $buf - $rd3 flush - - wait_for_blocked_clients_count 0 - - r set key3{t} value3 - - # If a command being unblocked causes another command to get unblocked, like a BLMOVE would do, - # then the new unblocked command will get processed right away rather than wait for later. - # If the set command occurs between two lmove commands, the results are not as expected. - assert_replication_stream $repl { - {select *} - {set key1{t} value1} - {lpush src{t} dummy} - {lmove src{t} dst{t} left right} - {lmove dst{t} src{t} right left} - {set key2{t} value2} - {set key3{t} value3} - } - - $rd1 close - $rd2 close - $rd3 close - - close_replication_stream $repl - } {} {needs:repl} - -} ;# stop servers \ No newline at end of file diff --git a/tests/unit/type/set.tcl b/tests/unit/type/set.tcl deleted file mode 100644 index f5bf8e4fc..000000000 --- a/tests/unit/type/set.tcl +++ /dev/null @@ -1,1314 +0,0 @@ -start_server { - tags {"set"} - overrides { - "set-max-intset-entries" 512 - "set-max-listpack-entries" 128 - "set-max-listpack-value" 32 - } -} { - proc create_set {key entries} { - r del $key - foreach entry $entries { r sadd $key $entry } - } - - # Values for initialing sets, per encoding. - array set initelems {listpack {foo} hashtable {foo}} - for {set i 0} {$i < 130} {incr i} { - lappend initelems(hashtable) [format "i%03d" $i] - } - -# foreach type {listpack hashtable} { -# test "SADD, SCARD, SISMEMBER, SMISMEMBER, SMEMBERS basics - $type" { -# create_set myset $initelems($type) -# #assert_encoding $type myset -# assert_equal 1 [r sadd myset bar] -# assert_equal 0 [r sadd myset bar] -# assert_equal [expr [llength $initelems($type)] + 1] [r scard myset] -# assert_equal 1 [r sismember myset foo] -# assert_equal 1 [r sismember myset bar] -# assert_equal 0 [r sismember myset bla] -# assert_equal {1} [r smismember myset foo] -# assert_equal {1 1} [r smismember myset foo bar] -# assert_equal {1 0} [r smismember myset foo bla] -# assert_equal {0 1} [r smismember myset bla foo] -# assert_equal {0} [r smismember myset bla] -# assert_equal "bar $initelems($type)" [lsort [r smembers myset]] -# } -# } - -# test {SADD, SCARD, SISMEMBER, SMISMEMBER, SMEMBERS basics - intset} { -# create_set myset {17} -# #assert_encoding intset myset -# assert_equal 1 [r sadd myset 16] -# assert_equal 0 [r sadd myset 16] -# assert_equal 2 [r scard myset] -# assert_equal 1 [r sismember myset 16] -# assert_equal 1 [r sismember myset 17] -# assert_equal 0 [r sismember myset 18] -# assert_equal {1} [r smismember myset 16] -# assert_equal {1 1} [r smismember myset 16 17] -# assert_equal {1 0} [r smismember myset 16 18] -# assert_equal {0 1} [r smismember myset 18 16] -# assert_equal {0} [r smismember myset 18] -# assert_equal {16 17} [lsort [r smembers myset]] -# } - - test {SMISMEMBER SMEMBERS SCARD against non set} { - r lpush mylist foo - assert_error WRONGTYPE* {r smismember mylist bar} - assert_error WRONGTYPE* {r smembers mylist} - assert_error WRONGTYPE* {r scard mylist} - } - - test {SMISMEMBER SMEMBERS SCARD against non existing key} { - assert_equal {0} [r smismember myset1 foo] - assert_equal {0 0} [r smismember myset1 foo bar] - assert_equal {} [r smembers myset1] - assert_equal {0} [r scard myset1] - } - - test {SMISMEMBER requires one or more members} { - r del zmscoretest - r zadd zmscoretest 10 x - r zadd zmscoretest 20 y - - catch {r smismember zmscoretest} e - assert_match {*ERR*wrong*number*arg*} $e - } - - test {SADD against non set} { - r lpush mylist foo - assert_error WRONGTYPE* {r sadd mylist bar} - } - - test "SADD a non-integer against a small intset" { - create_set myset {1 2 3} - assert_encoding intset myset - assert_equal 1 [r sadd myset a] - assert_encoding listpack myset - } - - test "SADD a non-integer against a large intset" { - create_set myset {0} - for {set i 1} {$i < 130} {incr i} {r sadd myset $i} - assert_encoding intset myset - assert_equal 1 [r sadd myset a] - assert_encoding hashtable myset - } - - test "SADD an integer larger than 64 bits" { - create_set myset {213244124402402314402033402} - assert_encoding listpack myset - assert_equal 1 [r sismember myset 213244124402402314402033402] - assert_equal {1} [r smismember myset 213244124402402314402033402] - } - - test "SADD an integer larger than 64 bits to a large intset" { - create_set myset {0} - for {set i 1} {$i < 130} {incr i} {r sadd myset $i} - assert_encoding intset myset - r sadd myset 213244124402402314402033402 - assert_encoding hashtable myset - assert_equal 1 [r sismember myset 213244124402402314402033402] - assert_equal {1} [r smismember myset 213244124402402314402033402] - } - -foreach type {single multiple single_multiple} { - test "SADD overflows the maximum allowed integers in an intset - $type" { - r del myset - - if {$type == "single"} { - # All are single sadd commands. - for {set i 0} {$i < 512} {incr i} { r sadd myset $i } - } elseif {$type == "multiple"} { - # One sadd command to add all elements. - set args {} - for {set i 0} {$i < 512} {incr i} { lappend args $i } - r sadd myset {*}$args - } elseif {$type == "single_multiple"} { - # First one sadd adds an element (creates a key) and then one sadd adds all elements. - r sadd myset 1 - set args {} - for {set i 0} {$i < 512} {incr i} { lappend args $i } - r sadd myset {*}$args - } - - assert_encoding intset myset - assert_equal 512 [r scard myset] - assert_equal 1 [r sadd myset 512] - assert_encoding hashtable myset - } - - test "SADD overflows the maximum allowed elements in a listpack - $type" { - r del myset - - if {$type == "single"} { - # All are single sadd commands. - r sadd myset a - for {set i 0} {$i < 127} {incr i} { r sadd myset $i } - } elseif {$type == "multiple"} { - # One sadd command to add all elements. - set args {} - lappend args a - for {set i 0} {$i < 127} {incr i} { lappend args $i } - r sadd myset {*}$args - } elseif {$type == "single_multiple"} { - # First one sadd adds an element (creates a key) and then one sadd adds all elements. - r sadd myset a - set args {} - lappend args a - for {set i 0} {$i < 127} {incr i} { lappend args $i } - r sadd myset {*}$args - } - - assert_encoding listpack myset - assert_equal 128 [r scard myset] - assert_equal 1 [r sadd myset b] - assert_encoding hashtable myset - } -} - - test {Variadic SADD} { - r del myset - assert_equal 3 [r sadd myset a b c] - assert_equal 2 [r sadd myset A a b c B] - assert_equal [lsort {A a b c B}] [lsort [r smembers myset]] - } - - test "Set encoding after DEBUG RELOAD" { - r del myintset - r del myhashset - r del mylargeintset - r del mysmallset - for {set i 0} {$i < 100} {incr i} { r sadd myintset $i } - for {set i 0} {$i < 1280} {incr i} { r sadd mylargeintset $i } - for {set i 0} {$i < 50} {incr i} { r sadd mysmallset [format "i%03d" $i] } - for {set i 0} {$i < 256} {incr i} { r sadd myhashset [format "i%03d" $i] } - assert_encoding intset myintset - assert_encoding hashtable mylargeintset - assert_encoding listpack mysmallset - assert_encoding hashtable myhashset - - r debug reload - assert_encoding intset myintset - assert_encoding hashtable mylargeintset - assert_encoding listpack mysmallset - assert_encoding hashtable myhashset - } {} {needs:debug} - - foreach type {listpack hashtable} { - test {SREM basics - $type} { - create_set myset $initelems($type) - r sadd myset ciao - assert_encoding $type myset - assert_equal 0 [r srem myset qux] - assert_equal 1 [r srem myset ciao] - assert_equal $initelems($type) [lsort [r smembers myset]] - } - } - - test {SREM basics - intset} { - create_set myset {3 4 5} - assert_encoding intset myset - assert_equal 0 [r srem myset 6] - assert_equal 1 [r srem myset 4] - assert_equal {3 5} [lsort [r smembers myset]] - } - - test {SREM with multiple arguments} { - r del myset - r sadd myset a b c d - assert_equal 0 [r srem myset k k k] - assert_equal 2 [r srem myset b d x y] - lsort [r smembers myset] - } {a c} - - test {SREM variadic version with more args needed to destroy the key} { - r del myset - r sadd myset 1 2 3 - r srem myset 1 2 3 4 5 6 7 8 - } {3} - - test "SINTERCARD with illegal arguments" { - assert_error "ERR wrong number of arguments for 'sintercard' command" {r sintercard} - assert_error "ERR wrong number of arguments for 'sintercard' command" {r sintercard 1} - - assert_error "ERR numkeys*" {r sintercard 0 myset{t}} - assert_error "ERR numkeys*" {r sintercard a myset{t}} - - assert_error "ERR Number of keys*" {r sintercard 2 myset{t}} - assert_error "ERR Number of keys*" {r sintercard 3 myset{t} myset2{t}} - - assert_error "ERR syntax error*" {r sintercard 1 myset{t} myset2{t}} - assert_error "ERR syntax error*" {r sintercard 1 myset{t} bar_arg} - assert_error "ERR syntax error*" {r sintercard 1 myset{t} LIMIT} - - assert_error "ERR LIMIT*" {r sintercard 1 myset{t} LIMIT -1} - assert_error "ERR LIMIT*" {r sintercard 1 myset{t} LIMIT a} - } - - test "SINTERCARD against non-set should throw error" { - r del set{t} - r sadd set{t} a b c - r set key1{t} x - - assert_error "WRONGTYPE*" {r sintercard 1 key1{t}} - assert_error "WRONGTYPE*" {r sintercard 2 set{t} key1{t}} - assert_error "WRONGTYPE*" {r sintercard 2 key1{t} noset{t}} - } - - test "SINTERCARD against non-existing key" { - assert_equal 0 [r sintercard 1 non-existing-key] - assert_equal 0 [r sintercard 1 non-existing-key limit 0] - assert_equal 0 [r sintercard 1 non-existing-key limit 10] - } - - foreach {type} {regular intset} { - # Create sets setN{t} where N = 1..5 - if {$type eq "regular"} { - set smallenc listpack - set bigenc hashtable - } else { - set smallenc intset - set bigenc intset - } - # Sets 1, 2 and 4 are big; sets 3 and 5 are small. - array set encoding "1 $bigenc 2 $bigenc 3 $smallenc 4 $bigenc 5 $smallenc" - - for {set i 1} {$i <= 5} {incr i} { - r del [format "set%d{t}" $i] - } - for {set i 0} {$i < 200} {incr i} { - r sadd set1{t} $i - r sadd set2{t} [expr $i+195] - } - foreach i {199 195 1000 2000} { - r sadd set3{t} $i - } - for {set i 5} {$i < 200} {incr i} { - r sadd set4{t} $i - } - r sadd set5{t} 0 - - # To make sure the sets are encoded as the type we are testing -- also - # when the VM is enabled and the values may be swapped in and out - # while the tests are running -- an extra element is added to every - # set that determines its encoding. - set large 200 - if {$type eq "regular"} { - set large foo - } - - for {set i 1} {$i <= 5} {incr i} { - r sadd [format "set%d{t}" $i] $large - } - - test "Generated sets must be encoded correctly - $type" { - for {set i 1} {$i <= 5} {incr i} { - assert_encoding $encoding($i) [format "set%d{t}" $i] - } - } - - test "SINTER with two sets - $type" { - assert_equal [list 195 196 197 198 199 $large] [lsort [r sinter set1{t} set2{t}]] - } - - test "SINTERCARD with two sets - $type" { - assert_equal 6 [r sintercard 2 set1{t} set2{t}] - assert_equal 6 [r sintercard 2 set1{t} set2{t} limit 0] - assert_equal 3 [r sintercard 2 set1{t} set2{t} limit 3] - assert_equal 6 [r sintercard 2 set1{t} set2{t} limit 10] - } - - test "SINTERSTORE with two sets - $type" { - r sinterstore setres{t} set1{t} set2{t} - assert_encoding $smallenc setres{t} - assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres{t}]] - } - - test "SINTERSTORE with two sets, after a DEBUG RELOAD - $type" { - r debug reload - r sinterstore setres{t} set1{t} set2{t} - assert_encoding $smallenc setres{t} - assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres{t}]] - } {} {needs:debug} - - test "SUNION with two sets - $type" { - set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"] - assert_equal $expected [lsort [r sunion set1{t} set2{t}]] - } - - test "SUNIONSTORE with two sets - $type" { - r sunionstore setres{t} set1{t} set2{t} - assert_encoding $bigenc setres{t} - set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"] - assert_equal $expected [lsort [r smembers setres{t}]] - } - - test "SINTER against three sets - $type" { - assert_equal [list 195 199 $large] [lsort [r sinter set1{t} set2{t} set3{t}]] - } - - test "SINTERCARD against three sets - $type" { - assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t}] - assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t} limit 0] - assert_equal 2 [r sintercard 3 set1{t} set2{t} set3{t} limit 2] - assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t} limit 10] - } - - test "SINTERSTORE with three sets - $type" { - r sinterstore setres{t} set1{t} set2{t} set3{t} - assert_equal [list 195 199 $large] [lsort [r smembers setres{t}]] - } - - test "SUNION with non existing keys - $type" { - set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"] - assert_equal $expected [lsort [r sunion nokey1{t} set1{t} set2{t} nokey2{t}]] - } - - test "SDIFF with two sets - $type" { - assert_equal {0 1 2 3 4} [lsort [r sdiff set1{t} set4{t}]] - } - - test "SDIFF with three sets - $type" { - assert_equal {1 2 3 4} [lsort [r sdiff set1{t} set4{t} set5{t}]] - } - - test "SDIFFSTORE with three sets - $type" { - r sdiffstore setres{t} set1{t} set4{t} set5{t} - # When we start with intsets, we should always end with intsets. - if {$type eq {intset}} { - assert_encoding intset setres{t} - } - assert_equal {1 2 3 4} [lsort [r smembers setres{t}]] - } - - test "SINTER/SUNION/SDIFF with three same sets - $type" { - set expected [lsort "[r smembers set1{t}]"] - assert_equal $expected [lsort [r sinter set1{t} set1{t} set1{t}]] - assert_equal $expected [lsort [r sunion set1{t} set1{t} set1{t}]] - assert_equal {} [lsort [r sdiff set1{t} set1{t} set1{t}]] - } - } - - test "SINTERSTORE with two listpack sets where result is intset" { - r del setres{t} set1{t} set2{t} - r sadd set1{t} a b c 1 3 6 x y z - r sadd set2{t} e f g 1 2 3 u v w - assert_encoding listpack set1{t} - assert_encoding listpack set2{t} - r sinterstore setres{t} set1{t} set2{t} - assert_equal [list 1 3] [lsort [r smembers setres{t}]] - assert_encoding intset setres{t} - } - - test "SINTERSTORE with two hashtable sets where result is intset" { - r del setres{t} set1{t} set2{t} - r sadd set1{t} a b c 444 555 666 - r sadd set2{t} e f g 111 222 333 - set expected {} - for {set i 1} {$i < 130} {incr i} { - r sadd set1{t} $i - r sadd set2{t} $i - lappend expected $i - } - assert_encoding hashtable set1{t} - assert_encoding hashtable set2{t} - r sinterstore setres{t} set1{t} set2{t} - assert_equal [lsort $expected] [lsort [r smembers setres{t}]] - assert_encoding intset setres{t} - } - - test "SUNION hashtable and listpack" { - # This adds code coverage for adding a non-sds string to a hashtable set - # which already contains the string. - r del set1{t} set2{t} - set union {abcdefghijklmnopqrstuvwxyz1234567890 a b c 1 2 3} - create_set set1{t} $union - create_set set2{t} {a b c} - assert_encoding hashtable set1{t} - assert_encoding listpack set2{t} - assert_equal [lsort $union] [lsort [r sunion set1{t} set2{t}]] - } - - test "SDIFF with first set empty" { - r del set1{t} set2{t} set3{t} - r sadd set2{t} 1 2 3 4 - r sadd set3{t} a b c d - r sdiff set1{t} set2{t} set3{t} - } {} - - test "SDIFF with same set two times" { - r del set1 - r sadd set1 a b c 1 2 3 4 5 6 - r sdiff set1 set1 - } {} - - test "SDIFF fuzzing" { - for {set j 0} {$j < 100} {incr j} { - unset -nocomplain s - array set s {} - set args {} - set num_sets [expr {[randomInt 10]+1}] - for {set i 0} {$i < $num_sets} {incr i} { - set num_elements [randomInt 100] - r del set_$i{t} - lappend args set_$i{t} - while {$num_elements} { - set ele [randomValue] - r sadd set_$i{t} $ele - if {$i == 0} { - set s($ele) x - } else { - unset -nocomplain s($ele) - } - incr num_elements -1 - } - } - set result [lsort [r sdiff {*}$args]] - assert_equal $result [lsort [array names s]] - } - } - - test "SDIFF against non-set should throw error" { - # with an empty set - r set key1{t} x - assert_error "WRONGTYPE*" {r sdiff key1{t} noset{t}} - # different order - assert_error "WRONGTYPE*" {r sdiff noset{t} key1{t}} - - # with a legal set - r del set1{t} - r sadd set1{t} a b c - assert_error "WRONGTYPE*" {r sdiff key1{t} set1{t}} - # different order - assert_error "WRONGTYPE*" {r sdiff set1{t} key1{t}} - } - - test "SDIFF should handle non existing key as empty" { - r del set1{t} set2{t} set3{t} - - r sadd set1{t} a b c - r sadd set2{t} b c d - assert_equal {a} [lsort [r sdiff set1{t} set2{t} set3{t}]] - assert_equal {} [lsort [r sdiff set3{t} set2{t} set1{t}]] - } - - test "SDIFFSTORE against non-set should throw error" { - r del set1{t} set2{t} set3{t} key1{t} - r set key1{t} x - - # with en empty dstkey - assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} key1{t} noset{t}} - assert_equal 0 [r exists set3{t}] - assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} noset{t} key1{t}} - assert_equal 0 [r exists set3{t}] - - # with a legal dstkey - r sadd set1{t} a b c - r sadd set2{t} b c d - r sadd set3{t} e - assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} key1{t} set1{t} noset{t}} - assert_equal 1 [r exists set3{t}] - assert_equal {e} [lsort [r smembers set3{t}]] - - assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} set1{t} key1{t} set2{t}} - assert_equal 1 [r exists set3{t}] - assert_equal {e} [lsort [r smembers set3{t}]] - } - - test "SDIFFSTORE should handle non existing key as empty" { - r del set1{t} set2{t} set3{t} - - r set setres{t} xxx - assert_equal 0 [r sdiffstore setres{t} foo111{t} bar222{t}] - assert_equal 0 [r exists setres{t}] - - # with a legal dstkey, should delete dstkey - r sadd set3{t} a b c - assert_equal 0 [r sdiffstore set3{t} set1{t} set2{t}] - assert_equal 0 [r exists set3{t}] - - r sadd set1{t} a b c - assert_equal 3 [r sdiffstore set3{t} set1{t} set2{t}] - assert_equal 1 [r exists set3{t}] - assert_equal {a b c} [lsort [r smembers set3{t}]] - - # with a legal dstkey and empty set2, should delete the dstkey - r sadd set3{t} a b c - assert_equal 0 [r sdiffstore set3{t} set2{t} set1{t}] - assert_equal 0 [r exists set3{t}] - } - - test "SINTER against non-set should throw error" { - r set key1{t} x - assert_error "WRONGTYPE*" {r sinter key1{t} noset{t}} - # different order - assert_error "WRONGTYPE*" {r sinter noset{t} key1{t}} - - r sadd set1{t} a b c - assert_error "WRONGTYPE*" {r sinter key1{t} set1{t}} - # different order - assert_error "WRONGTYPE*" {r sinter set1{t} key1{t}} - } - - test "SINTER should handle non existing key as empty" { - r del set1{t} set2{t} set3{t} - r sadd set1{t} a b c - r sadd set2{t} b c d - r sinter set1{t} set2{t} set3{t} - } {} - - test "SINTER with same integer elements but different encoding" { - r del set1{t} set2{t} - r sadd set1{t} 1 2 3 - r sadd set2{t} 1 2 3 a - r srem set2{t} a - assert_encoding intset set1{t} - assert_encoding listpack set2{t} - lsort [r sinter set1{t} set2{t}] - } {1 2 3} - - test "SINTERSTORE against non-set should throw error" { - r del set1{t} set2{t} set3{t} key1{t} - r set key1{t} x - - # with en empty dstkey - assert_error "WRONGTYPE*" {r sinterstore set3{t} key1{t} noset{t}} - assert_equal 0 [r exists set3{t}] - assert_error "WRONGTYPE*" {r sinterstore set3{t} noset{t} key1{t}} - assert_equal 0 [r exists set3{t}] - - # with a legal dstkey - r sadd set1{t} a b c - r sadd set2{t} b c d - r sadd set3{t} e - assert_error "WRONGTYPE*" {r sinterstore set3{t} key1{t} set2{t} noset{t}} - assert_equal 1 [r exists set3{t}] - assert_equal {e} [lsort [r smembers set3{t}]] - - assert_error "WRONGTYPE*" {r sinterstore set3{t} noset{t} key1{t} set2{t}} - assert_equal 1 [r exists set3{t}] - assert_equal {e} [lsort [r smembers set3{t}]] - } - - test "SINTERSTORE against non existing keys should delete dstkey" { - r del set1{t} set2{t} set3{t} - - r set setres{t} xxx - assert_equal 0 [r sinterstore setres{t} foo111{t} bar222{t}] - assert_equal 0 [r exists setres{t}] - - # with a legal dstkey - r sadd set3{t} a b c - assert_equal 0 [r sinterstore set3{t} set1{t} set2{t}] - assert_equal 0 [r exists set3{t}] - - r sadd set1{t} a b c - assert_equal 0 [r sinterstore set3{t} set1{t} set2{t}] - assert_equal 0 [r exists set3{t}] - - assert_equal 0 [r sinterstore set3{t} set2{t} set1{t}] - assert_equal 0 [r exists set3{t}] - } - - test "SUNION against non-set should throw error" { - r set key1{t} x - assert_error "WRONGTYPE*" {r sunion key1{t} noset{t}} - # different order - assert_error "WRONGTYPE*" {r sunion noset{t} key1{t}} - - r del set1{t} - r sadd set1{t} a b c - assert_error "WRONGTYPE*" {r sunion key1{t} set1{t}} - # different order - assert_error "WRONGTYPE*" {r sunion set1{t} key1{t}} - } - - test "SUNION should handle non existing key as empty" { - r del set1{t} set2{t} set3{t} - - r sadd set1{t} a b c - r sadd set2{t} b c d - assert_equal {a b c d} [lsort [r sunion set1{t} set2{t} set3{t}]] - } - - test "SUNIONSTORE against non-set should throw error" { - r del set1{t} set2{t} set3{t} key1{t} - r set key1{t} x - - # with en empty dstkey - assert_error "WRONGTYPE*" {r sunionstore set3{t} key1{t} noset{t}} - assert_equal 0 [r exists set3{t}] - assert_error "WRONGTYPE*" {r sunionstore set3{t} noset{t} key1{t}} - assert_equal 0 [r exists set3{t}] - - # with a legal dstkey - r sadd set1{t} a b c - r sadd set2{t} b c d - r sadd set3{t} e - assert_error "WRONGTYPE*" {r sunionstore set3{t} key1{t} key2{t} noset{t}} - assert_equal 1 [r exists set3{t}] - assert_equal {e} [lsort [r smembers set3{t}]] - - assert_error "WRONGTYPE*" {r sunionstore set3{t} noset{t} key1{t} key2{t}} - assert_equal 1 [r exists set3{t}] - assert_equal {e} [lsort [r smembers set3{t}]] - } - - test "SUNIONSTORE should handle non existing key as empty" { - r del set1{t} set2{t} set3{t} - - r set setres{t} xxx - assert_equal 0 [r sunionstore setres{t} foo111{t} bar222{t}] - assert_equal 0 [r exists setres{t}] - - # set1 set2 both empty, should delete the dstkey - r sadd set3{t} a b c - assert_equal 0 [r sunionstore set3{t} set1{t} set2{t}] - assert_equal 0 [r exists set3{t}] - - r sadd set1{t} a b c - r sadd set3{t} e f - assert_equal 3 [r sunionstore set3{t} set1{t} set2{t}] - assert_equal 1 [r exists set3{t}] - assert_equal {a b c} [lsort [r smembers set3{t}]] - - r sadd set3{t} d - assert_equal 3 [r sunionstore set3{t} set2{t} set1{t}] - assert_equal 1 [r exists set3{t}] - assert_equal {a b c} [lsort [r smembers set3{t}]] - } - - test "SUNIONSTORE against non existing keys should delete dstkey" { - r set setres{t} xxx - assert_equal 0 [r sunionstore setres{t} foo111{t} bar222{t}] - assert_equal 0 [r exists setres{t}] - } - - foreach {type contents} {listpack {a b c} intset {1 2 3}} { - test "SPOP basics - $type" { - create_set myset $contents - assert_encoding $type myset - assert_equal $contents [lsort [list [r spop myset] [r spop myset] [r spop myset]]] - assert_equal 0 [r scard myset] - } - - test "SPOP with =1 - $type" { - create_set myset $contents - assert_encoding $type myset - assert_equal $contents [lsort [list [r spop myset 1] [r spop myset 1] [r spop myset 1]]] - assert_equal 0 [r scard myset] - } - - test "SRANDMEMBER - $type" { - create_set myset $contents - unset -nocomplain myset - array set myset {} - for {set i 0} {$i < 100} {incr i} { - set myset([r srandmember myset]) 1 - } - assert_equal $contents [lsort [array names myset]] - } - } - - test "SPOP integer from listpack set" { - create_set myset {a 1 2 3 4 5 6 7} - assert_encoding listpack myset - set a [r spop myset] - set b [r spop myset] - assert {[string is digit $a] || [string is digit $b]} - } - - foreach {type contents} { - listpack {a b c d e f g h i j k l m n o p q r s t u v w x y z} - intset {1 10 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 26 3 4 5 6 7 8 9} - hashtable {ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 b c d e f g h i j k l m n o p q r s t u v w x y z} - } { - test "SPOP with - $type" { - create_set myset $contents - assert_encoding $type myset - assert_equal $contents [lsort [concat [r spop myset 11] [r spop myset 9] [r spop myset 0] [r spop myset 4] [r spop myset 1] [r spop myset 0] [r spop myset 1] [r spop myset 0]]] - assert_equal 0 [r scard myset] - } - } - - # As seen in intsetRandomMembers - test "SPOP using integers, testing Knuth's and Floyd's algorithm" { - create_set myset {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} - assert_encoding intset myset - assert_equal 20 [r scard myset] - r spop myset 1 - assert_equal 19 [r scard myset] - r spop myset 2 - assert_equal 17 [r scard myset] - r spop myset 3 - assert_equal 14 [r scard myset] - r spop myset 10 - assert_equal 4 [r scard myset] - r spop myset 10 - assert_equal 0 [r scard myset] - r spop myset 1 - assert_equal 0 [r scard myset] - } {} - - test "SPOP using integers with Knuth's algorithm" { - r spop nonexisting_key 100 - } {} - - foreach {type content} { - intset {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} - listpack {a 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20} - } { - test "SPOP new implementation: code path #1 $type" { - create_set myset $content - assert_encoding $type myset - set res [r spop myset 30] - assert {[lsort $content] eq [lsort $res]} - assert_equal {0} [r exists myset] - } - - test "SPOP new implementation: code path #2 $type" { - create_set myset $content - assert_encoding $type myset - set res [r spop myset 2] - assert {[llength $res] == 2} - assert {[r scard myset] == 18} - set union [concat [r smembers myset] $res] - assert {[lsort $union] eq [lsort $content]} - } - - test "SPOP new implementation: code path #3 $type" { - create_set myset $content - assert_encoding $type myset - set res [r spop myset 18] - assert {[llength $res] == 18} - assert {[r scard myset] == 2} - set union [concat [r smembers myset] $res] - assert {[lsort $union] eq [lsort $content]} - } - } - - test "SPOP new implementation: code path #1 propagate as DEL or UNLINK" { - r del myset1{t} myset2{t} - r sadd myset1{t} 1 2 3 4 5 - r sadd myset2{t} 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 - - set repl [attach_to_replication_stream] - - r config set lazyfree-lazy-server-del no - r spop myset1{t} [r scard myset1{t}] - r config set lazyfree-lazy-server-del yes - r spop myset2{t} [r scard myset2{t}] - assert_equal {0} [r exists myset1{t} myset2{t}] - - # Verify the propagate of DEL and UNLINK. - assert_replication_stream $repl { - {select *} - {del myset1{t}} - {unlink myset2{t}} - } - - close_replication_stream $repl - } {} {needs:repl} - - test "SRANDMEMBER count of 0 is handled correctly" { - r srandmember myset 0 - } {} - - test "SRANDMEMBER with against non existing key" { - r srandmember nonexisting_key 100 - } {} - - test "SRANDMEMBER count overflow" { - r sadd myset a - assert_error {*value is out of range*} {r srandmember myset -9223372036854775808} - } {} - - # Make sure we can distinguish between an empty array and a null response - r readraw 1 - - test "SRANDMEMBER count of 0 is handled correctly - emptyarray" { - r srandmember myset 0 - } {*0} - - test "SRANDMEMBER with against non existing key - emptyarray" { - r srandmember nonexisting_key 100 - } {*0} - - r readraw 0 - - foreach {type contents} { - listpack { - 1 5 10 50 125 50000 33959417 4775547 65434162 - 12098459 427716 483706 2726473884 72615637475 - MARY PATRICIA LINDA BARBARA ELIZABETH JENNIFER MARIA - SUSAN MARGARET DOROTHY LISA NANCY KAREN BETTY HELEN - SANDRA DONNA CAROL RUTH SHARON MICHELLE LAURA SARAH - KIMBERLY DEBORAH JESSICA SHIRLEY CYNTHIA ANGELA MELISSA - BRENDA AMY ANNA REBECCA VIRGINIA KATHLEEN - } - intset { - 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 33 34 35 36 37 38 39 - 40 41 42 43 44 45 46 47 48 49 - } - hashtable { - ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 - 1 5 10 50 125 50000 33959417 4775547 65434162 - 12098459 427716 483706 2726473884 72615637475 - MARY PATRICIA LINDA BARBARA ELIZABETH JENNIFER MARIA - SUSAN MARGARET DOROTHY LISA NANCY KAREN BETTY HELEN - SANDRA DONNA CAROL RUTH SHARON MICHELLE LAURA SARAH - KIMBERLY DEBORAH JESSICA SHIRLEY CYNTHIA ANGELA MELISSA - BRENDA AMY ANNA REBECCA VIRGINIA - } - } { - test "SRANDMEMBER with - $type" { - create_set myset $contents - assert_encoding $type myset - unset -nocomplain myset - array set myset {} - foreach ele [r smembers myset] { - set myset($ele) 1 - } - assert_equal [lsort $contents] [lsort [array names myset]] - - # Make sure that a count of 0 is handled correctly. - assert_equal [r srandmember myset 0] {} - - # We'll stress different parts of the code, see the implementation - # of SRANDMEMBER for more information, but basically there are - # four different code paths. - # - # PATH 1: Use negative count. - # - # 1) Check that it returns repeated elements. - set res [r srandmember myset -100] - assert_equal [llength $res] 100 - - # 2) Check that all the elements actually belong to the - # original set. - foreach ele $res { - assert {[info exists myset($ele)]} - } - - # 3) Check that eventually all the elements are returned. - unset -nocomplain auxset - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset -10] - foreach ele $res { - set auxset($ele) 1 - } - if {[lsort [array names myset]] eq - [lsort [array names auxset]]} { - break; - } - } - assert {$iterations != 0} - - # PATH 2: positive count (unique behavior) with requested size - # equal or greater than set size. - foreach size {50 100} { - set res [r srandmember myset $size] - assert_equal [llength $res] 50 - assert_equal [lsort $res] [lsort [array names myset]] - } - - # PATH 3: Ask almost as elements as there are in the set. - # In this case the implementation will duplicate the original - # set and will remove random elements up to the requested size. - # - # PATH 4: Ask a number of elements definitely smaller than - # the set size. - # - # We can test both the code paths just changing the size but - # using the same code. - - foreach size {45 5} { - set res [r srandmember myset $size] - assert_equal [llength $res] $size - - # 1) Check that all the elements actually belong to the - # original set. - foreach ele $res { - assert {[info exists myset($ele)]} - } - - # 2) Check that eventually all the elements are returned. - unset -nocomplain auxset - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset $size] - foreach ele $res { - set auxset($ele) 1 - } - if {[lsort [array names myset]] eq - [lsort [array names auxset]]} { - break; - } - } - assert {$iterations != 0} - } - } - } - - foreach {type contents} { - listpack { - 1 5 10 50 125 - MARY PATRICIA LINDA BARBARA ELIZABETH - } - intset { - 0 1 2 3 4 5 6 7 8 9 - } - hashtable { - ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 - 1 5 10 50 125 - MARY PATRICIA LINDA BARBARA - } - } { - test "SRANDMEMBER histogram distribution - $type" { - create_set myset $contents - assert_encoding $type myset - unset -nocomplain myset - array set myset {} - foreach ele [r smembers myset] { - set myset($ele) 1 - } - - # Use negative count (PATH 1). - # df = 9, 40 means 0.00001 probability - set res [r srandmember myset -1000] - assert_lessthan [chi_square_value $res] 40 - - # Use positive count (both PATH 3 and PATH 4). - foreach size {8 2} { - unset -nocomplain allkey - set iterations [expr {1000 / $size}] - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset $size] - foreach ele $res { - lappend allkey $ele - } - } - # df = 9, 40 means 0.00001 probability - assert_lessthan [chi_square_value $allkey] 40 - } - } - } - - proc is_rehashing {myset} { - set htstats [r debug HTSTATS-KEY $myset] - return [string match {*rehashing target*} $htstats] - } - - proc rem_hash_set_top_N {myset n} { - set cursor 0 - set members {} - set enough 0 - while 1 { - set res [r sscan $myset $cursor] - set cursor [lindex $res 0] - set k [lindex $res 1] - foreach m $k { - lappend members $m - if {[llength $members] >= $n} { - set enough 1 - break - } - } - if {$enough || $cursor == 0} { - break - } - } - r srem $myset {*}$members - } - - proc verify_rehashing_completed_key {myset table_size keys} { - set htstats [r debug HTSTATS-KEY $myset] - assert {![string match {*rehashing target*} $htstats]} - return {[string match {*table size: $table_size*number of elements: $keys*} $htstats]} - } - - test "SRANDMEMBER with a dict containing long chain" { - set origin_save [config_get_set save ""] - set origin_max_lp [config_get_set set-max-listpack-entries 0] - set origin_save_delay [config_get_set rdb-key-save-delay 2147483647] - - # 1) Create a hash set with 100000 members. - set members {} - for {set i 0} {$i < 100000} {incr i} { - lappend members [format "m:%d" $i] - } - create_set myset $members - - # 2) Wait for the hash set rehashing to finish. - while {[is_rehashing myset]} { - r srandmember myset 100 - } - - # 3) Turn off the rehashing of this set, and remove the members to 500. - r bgsave - rem_hash_set_top_N myset [expr {[r scard myset] - 500}] - assert_equal [r scard myset] 500 - - # 4) Kill RDB child process to restart rehashing. - set pid1 [get_child_pid 0] - catch {exec kill -9 $pid1} - waitForBgsave r - - # 5) Let the set hash to start rehashing - r spop myset 1 - assert [is_rehashing myset] - - # 6) Verify that when rdb saving is in progress, rehashing will still be performed (because - # the ratio is extreme) by waiting for it to finish during an active bgsave. - r bgsave - - while {[is_rehashing myset]} { - r srandmember myset 1 - } - if {$::verbose} { - puts [r debug HTSTATS-KEY myset full] - } - - set pid1 [get_child_pid 0] - catch {exec kill -9 $pid1} - waitForBgsave r - - # 7) Check that eventually, SRANDMEMBER returns all elements. - array set allmyset {} - foreach ele [r smembers myset] { - set allmyset($ele) 1 - } - unset -nocomplain auxset - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset -10] - foreach ele $res { - set auxset($ele) 1 - } - if {[lsort [array names allmyset]] eq - [lsort [array names auxset]]} { - break; - } - } - assert {$iterations != 0} - - # 8) Remove the members to 30 in order to calculate the value of Chi-Square Distribution, - # otherwise we would need more iterations. - rem_hash_set_top_N myset [expr {[r scard myset] - 30}] - assert_equal [r scard myset] 30 - - # Hash set rehashing would be completed while removing members from the `myset` - # We also check the size and members in the hash table. - verify_rehashing_completed_key myset 64 30 - - # Now that we have a hash set with only one long chain bucket. - set htstats [r debug HTSTATS-KEY myset full] - assert {[regexp {different slots: ([0-9]+)} $htstats - different_slots]} - assert {[regexp {max chain length: ([0-9]+)} $htstats - max_chain_length]} - assert {$different_slots == 1 && $max_chain_length == 30} - - # 9) Use positive count (PATH 4) to get 10 elements (out of 30) each time. - unset -nocomplain allkey - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset 10] - foreach ele $res { - lappend allkey $ele - } - } - # validate even distribution of random sampling (df = 29, 73 means 0.00001 probability) - assert_lessthan [chi_square_value $allkey] 73 - - r config set save $origin_save - r config set set-max-listpack-entries $origin_max_lp - r config set rdb-key-save-delay $origin_save_delay - } {OK} {needs:debug slow} - - proc setup_move {} { - r del myset3{t} myset4{t} - create_set myset1{t} {1 a b} - create_set myset2{t} {2 3 4} - assert_encoding listpack myset1{t} - assert_encoding intset myset2{t} - } - - test "SMOVE basics - from regular set to intset" { - # move a non-integer element to an intset should convert encoding - setup_move - assert_equal 1 [r smove myset1{t} myset2{t} a] - assert_equal {1 b} [lsort [r smembers myset1{t}]] - assert_equal {2 3 4 a} [lsort [r smembers myset2{t}]] - assert_encoding listpack myset2{t} - - # move an integer element should not convert the encoding - setup_move - assert_equal 1 [r smove myset1{t} myset2{t} 1] - assert_equal {a b} [lsort [r smembers myset1{t}]] - assert_equal {1 2 3 4} [lsort [r smembers myset2{t}]] - assert_encoding intset myset2{t} - } - - test "SMOVE basics - from intset to regular set" { - setup_move - assert_equal 1 [r smove myset2{t} myset1{t} 2] - assert_equal {1 2 a b} [lsort [r smembers myset1{t}]] - assert_equal {3 4} [lsort [r smembers myset2{t}]] - } - - test "SMOVE non existing key" { - setup_move - assert_equal 0 [r smove myset1{t} myset2{t} foo] - assert_equal 0 [r smove myset1{t} myset1{t} foo] - assert_equal {1 a b} [lsort [r smembers myset1{t}]] - assert_equal {2 3 4} [lsort [r smembers myset2{t}]] - } - - test "SMOVE non existing src set" { - setup_move - assert_equal 0 [r smove noset{t} myset2{t} foo] - assert_equal {2 3 4} [lsort [r smembers myset2{t}]] - } - - test "SMOVE from regular set to non existing destination set" { - setup_move - assert_equal 1 [r smove myset1{t} myset3{t} a] - assert_equal {1 b} [lsort [r smembers myset1{t}]] - assert_equal {a} [lsort [r smembers myset3{t}]] - assert_encoding listpack myset3{t} - } - - test "SMOVE from intset to non existing destination set" { - setup_move - assert_equal 1 [r smove myset2{t} myset3{t} 2] - assert_equal {3 4} [lsort [r smembers myset2{t}]] - assert_equal {2} [lsort [r smembers myset3{t}]] - assert_encoding intset myset3{t} - } - - test "SMOVE wrong src key type" { - r set x{t} 10 - assert_error "WRONGTYPE*" {r smove x{t} myset2{t} foo} - } - - test "SMOVE wrong dst key type" { - r set x{t} 10 - assert_error "WRONGTYPE*" {r smove myset2{t} x{t} foo} - } - - test "SMOVE with identical source and destination" { - r del set{t} - r sadd set{t} a b c - r smove set{t} set{t} b - lsort [r smembers set{t}] - } {a b c} - - test "SMOVE only notify dstset when the addition is successful" { - r del srcset{t} - r del dstset{t} - - r sadd srcset{t} a b - r sadd dstset{t} a - - r watch dstset{t} - - r multi - r sadd dstset{t} c - - set r2 [redis_client] - $r2 smove srcset{t} dstset{t} a - - # The dstset is actually unchanged, multi should success - r exec - set res [r scard dstset{t}] - assert_equal $res 2 - $r2 close - } - - tags {slow} { - test {intsets implementation stress testing} { - for {set j 0} {$j < 20} {incr j} { - unset -nocomplain s - array set s {} - r del s - set len [randomInt 1024] - for {set i 0} {$i < $len} {incr i} { - randpath { - set data [randomInt 65536] - } { - set data [randomInt 4294967296] - } { - set data [randomInt 18446744073709551616] - } - set s($data) {} - r sadd s $data - } - assert_equal [lsort [r smembers s]] [lsort [array names s]] - set len [array size s] - for {set i 0} {$i < $len} {incr i} { - set e [r spop s] - if {![info exists s($e)]} { - puts "Can't find '$e' on local array" - puts "Local array: [lsort [r smembers s]]" - puts "Remote array: [lsort [array names s]]" - error "exception" - } - array unset s $e - } - assert_equal [r scard s] 0 - assert_equal [array size s] 0 - } - } - } -} - -run_solo {set-large-memory} { -start_server [list overrides [list save ""] ] { - -# test if the server supports such large configs (avoid 32 bit builds) -catch { - r config set proto-max-bulk-len 10000000000 ;#10gb - r config set client-query-buffer-limit 10000000000 ;#10gb -} -if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} { - - set str_length 4400000000 ;#~4.4GB - - test {SADD, SCARD, SISMEMBER - large data} { - r flushdb - r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n" - assert_equal 1 [write_big_bulk $str_length "aaa"] - r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n" - assert_equal 1 [write_big_bulk $str_length "bbb"] - r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n" - assert_equal 0 [write_big_bulk $str_length "aaa"] - assert_encoding hashtable myset - set s0 [s used_memory] - assert {$s0 > [expr $str_length * 2]} - assert_equal 2 [r scard myset] - - r write "*3\r\n\$9\r\nSISMEMBER\r\n\$5\r\nmyset\r\n" - assert_equal 1 [write_big_bulk $str_length "aaa"] - r write "*3\r\n\$9\r\nSISMEMBER\r\n\$5\r\nmyset\r\n" - assert_equal 0 [write_big_bulk $str_length "ccc"] - r write "*3\r\n\$4\r\nSREM\r\n\$5\r\nmyset\r\n" - assert_equal 1 [write_big_bulk $str_length "bbb"] - assert_equal [read_big_bulk {r spop myset} yes "aaa"] $str_length - } {} {large-memory} - - # restore defaults - r config set proto-max-bulk-len 536870912 - r config set client-query-buffer-limit 1073741824 - -} ;# skip 32bit builds -} -} ;# run_solo \ No newline at end of file diff --git a/tests/unit/type/string.tcl b/tests/unit/type/string.tcl deleted file mode 100644 index 49615e8aa..000000000 --- a/tests/unit/type/string.tcl +++ /dev/null @@ -1,674 +0,0 @@ -start_server {tags {"string"}} { - test {SET and GET an item} { - r set x foobar - r get x - } {foobar} - - test {SET and GET an empty item} { - r set x {} - r get x - } {} - - test {Very big payload in GET/SET} { - set buf [string repeat "abcd" 1000000] - r set foo $buf - r get foo - } [string repeat "abcd" 1000000] - - tags {"slow"} { - test {Very big payload random access} { - set err {} - array set payload {} - for {set j 0} {$j < 100} {incr j} { - set size [expr 1+[randomInt 100000]] - set buf [string repeat "pl-$j" $size] - set payload($j) $buf - r set bigpayload_$j $buf - } - for {set j 0} {$j < 1000} {incr j} { - set index [randomInt 100] - set buf [r get bigpayload_$index] - if {$buf != $payload($index)} { - set err "Values differ: I set '$payload($index)' but I read back '$buf'" - break - } - } - unset payload - set _ $err - } {} - - test {SET 10000 numeric keys and access all them in reverse order} { - r flushdb - set err {} - for {set x 0} {$x < 10000} {incr x} { - r set $x $x - } - set sum 0 - for {set x 9999} {$x >= 0} {incr x -1} { - set val [r get $x] - if {$val ne $x} { - set err "Element at position $x is $val instead of $x" - break - } - } - set _ $err - } {} - - test {DBSIZE should be 10000 now} { - r dbsize - } {10000} - } - - test "SETNX target key missing" { - r del novar - assert_equal 1 [r setnx novar foobared] - assert_equal "foobared" [r get novar] - } - - test "SETNX target key exists" { - r set novar foobared - assert_equal 0 [r setnx novar blabla] - assert_equal "foobared" [r get novar] - } - - test "SETNX against not-expired volatile key" { - r set x 10 - r expire x 10000 - assert_equal 0 [r setnx x 20] - assert_equal 10 [r get x] - } - - test "SETNX against expired volatile key" { - # Make it very unlikely for the key this test uses to be expired by the - # active expiry cycle. This is tightly coupled to the implementation of - # active expiry and dbAdd() but currently the only way to test that - # SETNX expires a key when it should have been. - for {set x 0} {$x < 9999} {incr x} { - r setex key-$x 3600 value - } - - # This will be one of 10000 expiring keys. A cycle is executed every - # 100ms, sampling 10 keys for being expired or not. This key will be - # expired for at most 1s when we wait 2s, resulting in a total sample - # of 100 keys. The probability of the success of this test being a - # false positive is therefore approx. 1%. - r set x 10 - r expire x 1 - - # Wait for the key to expire - after 2000 - - assert_equal 1 [r setnx x 20] - assert_equal 20 [r get x] - } - - test "GETEX EX option" { - r del foo - r set foo bar - r getex foo ex 10 - assert_range [r ttl foo] 5 10 - } - - test "GETEX PX option" { - r del foo - r set foo bar - r getex foo px 10000 - assert_range [r pttl foo] 5000 10000 - } - - test "GETEX EXAT option" { - r del foo - r set foo bar - r getex foo exat [expr [clock seconds] + 10] - assert_range [r ttl foo] 5 10 - } - - test "GETEX PXAT option" { - r del foo - r set foo bar - r getex foo pxat [expr [clock milliseconds] + 10000] - assert_range [r pttl foo] 5000 10000 - } - - test "GETEX PERSIST option" { - r del foo - r set foo bar ex 10 - assert_range [r ttl foo] 5 10 - r getex foo persist - assert_equal -1 [r ttl foo] - } - - test "GETEX no option" { - r del foo - r set foo bar - r getex foo - assert_equal bar [r getex foo] - } - - test "GETEX syntax errors" { - set ex {} - catch {r getex foo non-existent-option} ex - set ex - } {*syntax*} - - test "GETEX and GET expired key or not exist" { - r del foo - r set foo bar px 1 - after 2 - assert_equal {} [r getex foo] - assert_equal {} [r get foo] - } - - test "GETEX no arguments" { - set ex {} - catch {r getex} ex - set ex - } {*wrong number of arguments for 'getex' command} - - test "GETDEL command" { - r del foo - r set foo bar - assert_equal bar [r getdel foo ] - assert_equal {} [r getdel foo ] - } - - test {GETDEL propagate as DEL command to replica} { - set repl [attach_to_replication_stream] - r set foo bar - r getdel foo - assert_replication_stream $repl { - {select *} - {set foo bar} - {del foo} - } - close_replication_stream $repl - } {} {needs:repl} - - test {GETEX without argument does not propagate to replica} { - set repl [attach_to_replication_stream] - r set foo bar - r getex foo - r del foo - assert_replication_stream $repl { - {select *} - {set foo bar} - {del foo} - } - close_replication_stream $repl - } {} {needs:repl} - - test {MGET} { - r flushdb - r set foo{t} BAR - r set bar{t} FOO - r mget foo{t} bar{t} - } {BAR FOO} - - test {MGET against non existing key} { - r mget foo{t} baazz{t} bar{t} - } {BAR {} FOO} - - test {MGET against non-string key} { - r sadd myset{t} ciao - r sadd myset{t} bau - r mget foo{t} baazz{t} bar{t} myset{t} - } {BAR {} FOO {}} - - test {GETSET (set new value)} { - r del foo - list [r getset foo xyz] [r get foo] - } {{} xyz} - - test {GETSET (replace old value)} { - r set foo bar - list [r getset foo xyz] [r get foo] - } {bar xyz} - - test {MSET base case} { - r mset x{t} 10 y{t} "foo bar" z{t} "x x x x x x x\n\n\r\n" - r mget x{t} y{t} z{t} - } [list 10 {foo bar} "x x x x x x x\n\n\r\n"] - - test {MSET/MSETNX wrong number of args} { - assert_error {*wrong number of arguments for 'mset' command} {r mset x{t} 10 y{t} "foo bar" z{t}} - assert_error {*wrong number of arguments for 'msetnx' command} {r msetnx x{t} 20 y{t} "foo bar" z{t}} - } - - test {MSET with already existing - same key twice} { - r set x{t} x - list [r mset x{t} xxx x{t} yyy] [r get x{t}] - } {OK yyy} - - test {MSETNX with already existent key} { - list [r msetnx x1{t} xxx y2{t} yyy x{t} 20] [r exists x1{t}] [r exists y2{t}] - } {0 0 0} - - test {MSETNX with not existing keys} { - list [r msetnx x1{t} xxx y2{t} yyy] [r get x1{t}] [r get y2{t}] - } {1 xxx yyy} - - test {MSETNX with not existing keys - same key twice} { - r del x1{t} - list [r msetnx x1{t} xxx x1{t} yyy] [r get x1{t}] - } {1 yyy} - - test {MSETNX with already existing keys - same key twice} { - list [r msetnx x1{t} xxx x1{t} zzz] [r get x1{t}] - } {0 yyy} - - test "STRLEN against non-existing key" { - assert_equal 0 [r strlen notakey] - } - - test "STRLEN against integer-encoded value" { - r set myinteger -555 - assert_equal 4 [r strlen myinteger] - } - - test "STRLEN against plain string" { - r set mystring "foozzz0123456789 baz" - assert_equal 20 [r strlen mystring] - } - - test "SETBIT against non-existing key" { - r del mykey - assert_equal 0 [r setbit mykey 1 1] - assert_equal [binary format B* 01000000] [r get mykey] - } - - test "SETBIT against string-encoded key" { - # Ascii "@" is integer 64 = 01 00 00 00 - r set mykey "@" - - assert_equal 0 [r setbit mykey 2 1] - assert_equal [binary format B* 01100000] [r get mykey] - assert_equal 1 [r setbit mykey 1 0] - assert_equal [binary format B* 00100000] [r get mykey] - } - - test "SETBIT against integer-encoded key" { - # Ascii "1" is integer 49 = 00 11 00 01 - r set mykey 1 - assert_encoding int mykey - - assert_equal 0 [r setbit mykey 6 1] - assert_equal [binary format B* 00110011] [r get mykey] - assert_equal 1 [r setbit mykey 2 0] - assert_equal [binary format B* 00010011] [r get mykey] - } - - test "SETBIT against key with wrong type" { - r del mykey - r lpush mykey "foo" - assert_error "WRONGTYPE*" {r setbit mykey 0 1} - } - - test "SETBIT with out of range bit offset" { - r del mykey - assert_error "*out of range*" {r setbit mykey [expr 4*1024*1024*1024] 1} - assert_error "*out of range*" {r setbit mykey -1 1} - } - - test "SETBIT with non-bit argument" { - r del mykey - assert_error "*out of range*" {r setbit mykey 0 -1} - assert_error "*out of range*" {r setbit mykey 0 2} - assert_error "*out of range*" {r setbit mykey 0 10} - assert_error "*out of range*" {r setbit mykey 0 20} - } - - test "SETBIT fuzzing" { - set str "" - set len [expr 256*8] - r del mykey - - for {set i 0} {$i < 2000} {incr i} { - set bitnum [randomInt $len] - set bitval [randomInt 2] - set fmt [format "%%-%ds%%d%%-s" $bitnum] - set head [string range $str 0 $bitnum-1] - set tail [string range $str $bitnum+1 end] - set str [string map {" " 0} [format $fmt $head $bitval $tail]] - - r setbit mykey $bitnum $bitval - assert_equal [binary format B* $str] [r get mykey] - } - } - - test "GETBIT against non-existing key" { - r del mykey - assert_equal 0 [r getbit mykey 0] - } - - test "GETBIT against string-encoded key" { - # Single byte with 2nd and 3rd bit set - r set mykey "`" - - # In-range - assert_equal 0 [r getbit mykey 0] - assert_equal 1 [r getbit mykey 1] - assert_equal 1 [r getbit mykey 2] - assert_equal 0 [r getbit mykey 3] - - # Out-range - assert_equal 0 [r getbit mykey 8] - assert_equal 0 [r getbit mykey 100] - assert_equal 0 [r getbit mykey 10000] - } - - test "GETBIT against integer-encoded key" { - r set mykey 1 - assert_encoding int mykey - - # Ascii "1" is integer 49 = 00 11 00 01 - assert_equal 0 [r getbit mykey 0] - assert_equal 0 [r getbit mykey 1] - assert_equal 1 [r getbit mykey 2] - assert_equal 1 [r getbit mykey 3] - - # Out-range - assert_equal 0 [r getbit mykey 8] - assert_equal 0 [r getbit mykey 100] - assert_equal 0 [r getbit mykey 10000] - } - - test "SETRANGE against non-existing key" { - r del mykey - assert_equal 3 [r setrange mykey 0 foo] - assert_equal "foo" [r get mykey] - - r del mykey - assert_equal 0 [r setrange mykey 0 ""] - assert_equal 0 [r exists mykey] - - r del mykey - assert_equal 4 [r setrange mykey 1 foo] - assert_equal "\000foo" [r get mykey] - } - - test "SETRANGE against string-encoded key" { - r set mykey "foo" - assert_equal 3 [r setrange mykey 0 b] - assert_equal "boo" [r get mykey] - - r set mykey "foo" - assert_equal 3 [r setrange mykey 0 ""] - assert_equal "foo" [r get mykey] - - r set mykey "foo" - assert_equal 3 [r setrange mykey 1 b] - assert_equal "fbo" [r get mykey] - - r set mykey "foo" - assert_equal 7 [r setrange mykey 4 bar] - assert_equal "foo\000bar" [r get mykey] - } - - test "SETRANGE against integer-encoded key" { - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 0 2] - assert_encoding raw mykey - assert_equal 2234 [r get mykey] - - # Shouldn't change encoding when nothing is set - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 0 ""] - assert_encoding int mykey - assert_equal 1234 [r get mykey] - - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 1 3] - assert_encoding raw mykey - assert_equal 1334 [r get mykey] - - r set mykey 1234 - assert_encoding int mykey - assert_equal 6 [r setrange mykey 5 2] - assert_encoding raw mykey - assert_equal "1234\0002" [r get mykey] - } - - test "SETRANGE against key with wrong type" { - r del mykey - r lpush mykey "foo" - assert_error "WRONGTYPE*" {r setrange mykey 0 bar} - } - - test "SETRANGE with out of range offset" { - r del mykey - assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} - - r set mykey "hello" - assert_error "*out of range*" {r setrange mykey -1 world} - assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} - } - - test "GETRANGE against non-existing key" { - r del mykey - assert_equal "" [r getrange mykey 0 -1] - } - - test "GETRANGE against wrong key type" { - r lpush lkey1 "list" - assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r getrange lkey1 0 -1} - } - - test "GETRANGE against string value" { - r set mykey "Hello World" - assert_equal "Hell" [r getrange mykey 0 3] - assert_equal "Hello World" [r getrange mykey 0 -1] - assert_equal "orld" [r getrange mykey -4 -1] - assert_equal "" [r getrange mykey 5 3] - assert_equal " World" [r getrange mykey 5 5000] - assert_equal "Hello World" [r getrange mykey -5000 10000] - } - - test "GETRANGE against integer-encoded value" { - r set mykey 1234 - assert_equal "123" [r getrange mykey 0 2] - assert_equal "1234" [r getrange mykey 0 -1] - assert_equal "234" [r getrange mykey -3 -1] - assert_equal "" [r getrange mykey 5 3] - assert_equal "4" [r getrange mykey 3 5000] - assert_equal "1234" [r getrange mykey -5000 10000] - } - - test "GETRANGE fuzzing" { - for {set i 0} {$i < 1000} {incr i} { - r set bin [set bin [randstring 0 1024 binary]] - set _start [set start [randomInt 1500]] - set _end [set end [randomInt 1500]] - if {$_start < 0} {set _start "end-[abs($_start)-1]"} - if {$_end < 0} {set _end "end-[abs($_end)-1]"} - assert_equal [string range $bin $_start $_end] [r getrange bin $start $end] - } - } - - test "Coverage: SUBSTR" { - r set key abcde - assert_equal "a" [r substr key 0 0] - assert_equal "abcd" [r substr key 0 3] - assert_equal "bcde" [r substr key -4 -1] - assert_equal "" [r substr key -1 -3] - assert_equal "" [r substr key 7 8] - assert_equal "" [r substr nokey 0 1] - } - -if {[string match {*jemalloc*} [s mem_allocator]]} { - test {trim on SET with big value} { - # set a big value to trigger increasing the query buf - r set key [string repeat A 100000] - # set a smaller value but > PROTO_MBULK_BIG_ARG (32*1024) Redis will try to save the query buf itself on the DB. - r set key [string repeat A 33000] - # asset the value was trimmed - assert {[r memory usage key] < 42000}; # 42K to count for Jemalloc's additional memory overhead. - } -} ;# if jemalloc - - test {Extended SET can detect syntax errors} { - set e {} - catch {r set foo bar non-existing-option} e - set e - } {*syntax*} - - test {Extended SET NX option} { - r del foo - set v1 [r set foo 1 nx] - set v2 [r set foo 2 nx] - list $v1 $v2 [r get foo] - } {OK {} 1} - - test {Extended SET XX option} { - r del foo - set v1 [r set foo 1 xx] - r set foo bar - set v2 [r set foo 2 xx] - list $v1 $v2 [r get foo] - } {{} OK 2} - - test {Extended SET GET option} { - r del foo - r set foo bar - set old_value [r set foo bar2 GET] - set new_value [r get foo] - list $old_value $new_value - } {bar bar2} - - test {Extended SET GET option with no previous value} { - r del foo - set old_value [r set foo bar GET] - set new_value [r get foo] - list $old_value $new_value - } {{} bar} - - test {Extended SET GET option with XX} { - r del foo - r set foo bar - set old_value [r set foo baz GET XX] - set new_value [r get foo] - list $old_value $new_value - } {bar baz} - - test {Extended SET GET option with XX and no previous value} { - r del foo - set old_value [r set foo bar GET XX] - set new_value [r get foo] - list $old_value $new_value - } {{} {}} - - test {Extended SET GET option with NX} { - r del foo - set old_value [r set foo bar GET NX] - set new_value [r get foo] - list $old_value $new_value - } {{} bar} - - test {Extended SET GET option with NX and previous value} { - r del foo - r set foo bar - set old_value [r set foo baz GET NX] - set new_value [r get foo] - list $old_value $new_value - } {bar bar} - - test {Extended SET GET with incorrect type should result in wrong type error} { - r del foo - r rpush foo waffle - catch {r set foo bar GET} err1 - assert_equal "waffle" [r rpop foo] - set err1 - } {*WRONGTYPE*} - - test {Extended SET EX option} { - r del foo - r set foo bar ex 10 - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {Extended SET PX option} { - r del foo - r set foo bar px 10000 - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test "Extended SET EXAT option" { - r del foo - r set foo bar exat [expr [clock seconds] + 10] - assert_range [r ttl foo] 5 10 - } - - test "Extended SET PXAT option" { - r del foo - r set foo bar pxat [expr [clock milliseconds] + 10000] - assert_range [r ttl foo] 5 10 - } - test {Extended SET using multiple options at once} { - r set foo val - assert {[r set foo bar xx px 10000] eq {OK}} - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {GETRANGE with huge ranges, Github issue #1844} { - r set foo bar - r getrange foo 0 4294967297 - } {bar} - - set rna1 {CACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTTCGTCCGGGTGTG} - set rna2 {ATTAAAGGTTTATACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTT} - set rnalcs {ACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTT} - - test {LCS basic} { - r set virus1{t} $rna1 - r set virus2{t} $rna2 - r LCS virus1{t} virus2{t} - } $rnalcs - - test {LCS len} { - r set virus1{t} $rna1 - r set virus2{t} $rna2 - r LCS virus1{t} virus2{t} LEN - } [string length $rnalcs] - - test {LCS indexes} { - dict get [r LCS virus1{t} virus2{t} IDX] matches - } {{{238 238} {239 239}} {{236 236} {238 238}} {{229 230} {236 237}} {{224 224} {235 235}} {{1 222} {13 234}}} - - test {LCS indexes with match len} { - dict get [r LCS virus1{t} virus2{t} IDX WITHMATCHLEN] matches - } {{{238 238} {239 239} 1} {{236 236} {238 238} 1} {{229 230} {236 237} 2} {{224 224} {235 235} 1} {{1 222} {13 234} 222}} - - test {LCS indexes with match len and minimum match len} { - dict get [r LCS virus1{t} virus2{t} IDX WITHMATCHLEN MINMATCHLEN 5] matches - } {{{1 222} {13 234} 222}} - - test {SETRANGE with huge offset} { - foreach value {9223372036854775807 2147483647} { - catch {[r setrange K $value A]} res - # expecting a different error on 32 and 64 bit systems - if {![string match "*string exceeds maximum allowed size*" $res] && ![string match "*out of range*" $res]} { - assert_equal $res "expecting an error" - } - } - } - - test {APPEND modifies the encoding from int to raw} { - r del foo - r set foo 1 - assert_encoding "int" foo - r append foo 2 - - set res {} - lappend res [r get foo] - assert_encoding "raw" foo - - r set bar 12 - assert_encoding "int" bar - lappend res [r get bar] - } {12 12} -} \ No newline at end of file diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl deleted file mode 100644 index f23dfeb65..000000000 --- a/tests/unit/type/zset.tcl +++ /dev/null @@ -1,2735 +0,0 @@ -start_server {tags {"zset"}} { - proc create_zset {key items} { - r del $key - foreach {score entry} $items { - r zadd $key $score $entry - } - } - - # A helper function to verify either ZPOP* or ZMPOP* response. - proc verify_pop_response {pop res zpop_expected_response zmpop_expected_response} { - if {[string match "*ZM*" $pop]} { - assert_equal $res $zmpop_expected_response - } else { - assert_equal $res $zpop_expected_response - } - } - - # A helper function to verify either ZPOP* or ZMPOP* response when given one input key. - proc verify_zpop_response {rd pop key count zpop_expected_response zmpop_expected_response} { - if {[string match "ZM*" $pop]} { - lassign [split $pop "_"] pop where - - if {$count == 0} { - set res [$rd $pop 1 $key $where] - } else { - set res [$rd $pop 1 $key $where COUNT $count] - } - } else { - if {$count == 0} { - set res [$rd $pop $key] - } else { - set res [$rd $pop $key $count] - } - } - verify_pop_response $pop $res $zpop_expected_response $zmpop_expected_response - } - - # A helper function to verify either BZPOP* or BZMPOP* response when given one input key. - proc verify_bzpop_response {rd pop key timeout count bzpop_expected_response bzmpop_expected_response} { - if {[string match "BZM*" $pop]} { - lassign [split $pop "_"] pop where - - if {$count == 0} { - $rd $pop $timeout 1 $key $where - } else { - $rd $pop $timeout 1 $key $where COUNT $count - } - } else { - $rd $pop $key $timeout - } - verify_pop_response $pop [$rd read] $bzpop_expected_response $bzmpop_expected_response - } - - # A helper function to verify either ZPOP* or ZMPOP* response when given two input keys. - proc verify_bzpop_two_key_response {rd pop key key2 timeout count bzpop_expected_response bzmpop_expected_response} { - if {[string match "BZM*" $pop]} { - lassign [split $pop "_"] pop where - - if {$count == 0} { - $rd $pop $timeout 2 $key $key2 $where - } else { - $rd $pop $timeout 2 $key $key2 $where COUNT $count - } - } else { - $rd $pop $key $key2 $timeout - } - verify_pop_response $pop [$rd read] $bzpop_expected_response $bzmpop_expected_response - } - - # A helper function to execute either BZPOP* or BZMPOP* with one input key. - proc bzpop_command {rd pop key timeout} { - if {[string match "BZM*" $pop]} { - lassign [split $pop "_"] pop where - $rd $pop $timeout 1 $key $where COUNT 1 - } else { - $rd $pop $key $timeout - } - } - - # A helper function to verify nil response in readraw base on RESP version. - proc verify_nil_response {resp nil_response} { - if {$resp == 2} { - assert_equal $nil_response {*-1} - } elseif {$resp == 3} { - assert_equal $nil_response {_} - } - } - - # A helper function to verify zset score response in readraw base on RESP version. - proc verify_score_response {rd resp score} { - if {$resp == 2} { - assert_equal [$rd read] {$1} - assert_equal [$rd read] $score - } elseif {$resp == 3} { - assert_equal [$rd read] ",$score" - } - } - - proc basics {encoding} { -# set original_max_entries [lindex [r config get zset-max-ziplist-entries] 1] -# set original_max_value [lindex [r config get zset-max-ziplist-value] 1] -# if {$encoding == "listpack"} { -# r config set zset-max-ziplist-entries 128 -# r config set zset-max-ziplist-value 64 -# } elseif {$encoding == "skiplist"} { -# r config set zset-max-ziplist-entries 0 -# r config set zset-max-ziplist-value 0 -# } else { -# puts "Unknown sorted set encoding" -# exit -# } - - test "Check encoding - $encoding" { - r del ztmp - r zadd ztmp 10 x - #assert_encoding $encoding ztmp - } - - test "ZSET basic ZADD and score update - $encoding" { - r del ztmp - r zadd ztmp 10 x - r zadd ztmp 20 y - r zadd ztmp 30 z - assert_equal {x y z} [r zrange ztmp 0 -1] - - r zadd ztmp 1 y - assert_equal {y x z} [r zrange ztmp 0 -1] - } - - test "ZSET element can't be set to NaN with ZADD - $encoding" { - assert_error "*not*float*" {r zadd myzset nan abc} - } - - test "ZSET element can't be set to NaN with ZINCRBY - $encoding" { - assert_error "*not*float*" {r zincrby myzset nan abc} - } - - test "ZADD with options syntax error with incomplete pair - $encoding" { - r del ztmp - catch {r zadd ztmp xx 10 x 20} err - set err - } {ERR*} - - test "ZADD XX option without key - $encoding" { - r del ztmp - assert {[r zadd ztmp xx 10 x] == 0} - assert {[r type ztmp] eq {none}} - } - - test "ZADD XX existing key - $encoding" { - r del ztmp - r zadd ztmp 10 x - assert {[r zadd ztmp xx 20 y] == 0} - assert {[r zcard ztmp] == 1} - } - - test "ZADD XX returns the number of elements actually added - $encoding" { - r del ztmp - r zadd ztmp 10 x - set retval [r zadd ztmp 10 x 20 y 30 z] - assert {$retval == 2} - } - - test "ZADD XX updates existing elements score - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - r zadd ztmp xx 5 foo 11 x 21 y 40 zap - assert {[r zcard ztmp] == 3} - assert {[r zscore ztmp x] == 11} - assert {[r zscore ztmp y] == 21} - } - - test "ZADD GT updates existing elements when new scores are greater - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - assert {[r zadd ztmp gt ch 5 foo 11 x 21 y 29 z] == 3} - assert {[r zcard ztmp] == 4} - assert {[r zscore ztmp x] == 11} - assert {[r zscore ztmp y] == 21} - assert {[r zscore ztmp z] == 30} - } - - test "ZADD LT updates existing elements when new scores are lower - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - assert {[r zadd ztmp lt ch 5 foo 11 x 21 y 29 z] == 2} - assert {[r zcard ztmp] == 4} - assert {[r zscore ztmp x] == 10} - assert {[r zscore ztmp y] == 20} - assert {[r zscore ztmp z] == 29} - } - - test "ZADD GT XX updates existing elements when new scores are greater and skips new elements - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - assert {[r zadd ztmp gt xx ch 5 foo 11 x 21 y 29 z] == 2} - assert {[r zcard ztmp] == 3} - assert {[r zscore ztmp x] == 11} - assert {[r zscore ztmp y] == 21} - assert {[r zscore ztmp z] == 30} - } - - test "ZADD LT XX updates existing elements when new scores are lower and skips new elements - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - assert {[r zadd ztmp lt xx ch 5 foo 11 x 21 y 29 z] == 1} - assert {[r zcard ztmp] == 3} - assert {[r zscore ztmp x] == 10} - assert {[r zscore ztmp y] == 20} - assert {[r zscore ztmp z] == 29} - } - - test "ZADD XX and NX are not compatible - $encoding" { - r del ztmp - catch {r zadd ztmp xx nx 10 x} err - set err - } {ERR*} - - test "ZADD NX with non existing key - $encoding" { - r del ztmp - r zadd ztmp nx 10 x 20 y 30 z - assert {[r zcard ztmp] == 3} - } - - test "ZADD NX only add new elements without updating old ones - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - assert {[r zadd ztmp nx 11 x 21 y 100 a 200 b] == 2} - assert {[r zscore ztmp x] == 10} - assert {[r zscore ztmp y] == 20} - assert {[r zscore ztmp a] == 100} - assert {[r zscore ztmp b] == 200} - } - - test "ZADD GT and NX are not compatible - $encoding" { - r del ztmp - catch {r zadd ztmp gt nx 10 x} err - set err - } {ERR*} - - test "ZADD LT and NX are not compatible - $encoding" { - r del ztmp - catch {r zadd ztmp lt nx 10 x} err - set err - } {ERR*} - - test "ZADD LT and GT are not compatible - $encoding" { - r del ztmp - catch {r zadd ztmp lt gt 10 x} err - set err - } {ERR*} - - test "ZADD INCR LT/GT replies with nill if score not updated - $encoding" { - r del ztmp - r zadd ztmp 28 x - assert {[r zadd ztmp lt incr 1 x] eq {}} - assert {[r zscore ztmp x] == 28} - assert {[r zadd ztmp gt incr -1 x] eq {}} - assert {[r zscore ztmp x] == 28} - } - - test "ZADD INCR LT/GT with inf - $encoding" { - r del ztmp - r zadd ztmp +inf x -inf y - - assert {[r zadd ztmp lt incr 1 x] eq {}} - assert {[r zscore ztmp x] == inf} - assert {[r zadd ztmp gt incr -1 x] eq {}} - assert {[r zscore ztmp x] == inf} - assert {[r zadd ztmp lt incr -1 x] eq {}} - assert {[r zscore ztmp x] == inf} - assert {[r zadd ztmp gt incr 1 x] eq {}} - assert {[r zscore ztmp x] == inf} - - assert {[r zadd ztmp lt incr 1 y] eq {}} - assert {[r zscore ztmp y] == -inf} - assert {[r zadd ztmp gt incr -1 y] eq {}} - assert {[r zscore ztmp y] == -inf} - assert {[r zadd ztmp lt incr -1 y] eq {}} - assert {[r zscore ztmp y] == -inf} - assert {[r zadd ztmp gt incr 1 y] eq {}} - assert {[r zscore ztmp y] == -inf} - } - - test "ZADD INCR works like ZINCRBY - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - r zadd ztmp INCR 15 x - assert {[r zscore ztmp x] == 25} - } - - test "ZADD INCR works with a single score-elemenet pair - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - catch {r zadd ztmp INCR 15 x 10 y} err - set err - } {ERR*} - - test "ZADD CH option changes return value to all changed elements - $encoding" { - r del ztmp - r zadd ztmp 10 x 20 y 30 z - assert {[r zadd ztmp 11 x 21 y 30 z] == 0} - assert {[r zadd ztmp ch 12 x 22 y 30 z] == 2} - } - - test "ZINCRBY calls leading to NaN result in error - $encoding" { - r zincrby myzset +inf abc - assert_error "*NaN*" {r zincrby myzset -inf abc} - } - - test "ZINCRBY against invalid incr value - $encoding" { - r del zincr - r zadd zincr 1 "one" - assert_error "*value is not a valid*" {r zincrby zincr v "one"} - } - - test "ZADD - Variadic version base case - $encoding" { - r del myzset - list [r zadd myzset 10 a 20 b 30 c] [r zrange myzset 0 -1 withscores] - } {3 {a 10 b 20 c 30}} - - test "ZADD - Return value is the number of actually added items - $encoding" { - list [r zadd myzset 5 x 20 b 30 c] [r zrange myzset 0 -1 withscores] - } {1 {x 5 a 10 b 20 c 30}} - - test "ZADD - Variadic version does not add nothing on single parsing err - $encoding" { - r del myzset - catch {r zadd myzset 10 a 20 b 30.badscore c} e - assert_match {*ERR*not*float*} $e - r exists myzset - } {0} - - test "ZADD - Variadic version will raise error on missing arg - $encoding" { - r del myzset - catch {r zadd myzset 10 a 20 b 30 c 40} e - assert_match {*ERR*syntax*} $e - } - - test "ZINCRBY does not work variadic even if shares ZADD implementation - $encoding" { - r del myzset - catch {r zincrby myzset 10 a 20 b 30 c} e - assert_match {*ERR*wrong*number*arg*} $e - } - - test "ZCARD basics - $encoding" { - r del ztmp - r zadd ztmp 10 a 20 b 30 c - assert_equal 3 [r zcard ztmp] - assert_equal 0 [r zcard zdoesntexist] - } - - test "ZREM removes key after last element is removed - $encoding" { - r del ztmp - r zadd ztmp 10 x - r zadd ztmp 20 y - - assert_equal 1 [r exists ztmp] - assert_equal 0 [r zrem ztmp z] - assert_equal 1 [r zrem ztmp y] - assert_equal 1 [r zrem ztmp x] - assert_equal 0 [r exists ztmp] - } - - test "ZREM variadic version - $encoding" { - r del ztmp - r zadd ztmp 10 a 20 b 30 c - assert_equal 2 [r zrem ztmp x y a b k] - assert_equal 0 [r zrem ztmp foo bar] - assert_equal 1 [r zrem ztmp c] - r exists ztmp - } {0} - - test "ZREM variadic version -- remove elements after key deletion - $encoding" { - r del ztmp - r zadd ztmp 10 a 20 b 30 c - r zrem ztmp a b c d e f g - } {3} - - test "ZRANGE basics - $encoding" { - r del ztmp - r zadd ztmp 1 a - r zadd ztmp 2 b - r zadd ztmp 3 c - r zadd ztmp 4 d - - assert_equal {a b c d} [r zrange ztmp 0 -1] - assert_equal {a b c} [r zrange ztmp 0 -2] - assert_equal {b c d} [r zrange ztmp 1 -1] - assert_equal {b c} [r zrange ztmp 1 -2] - assert_equal {c d} [r zrange ztmp -2 -1] - assert_equal {c} [r zrange ztmp -2 -2] - - # out of range start index - assert_equal {a b c} [r zrange ztmp -5 2] - assert_equal {a b} [r zrange ztmp -5 1] - assert_equal {} [r zrange ztmp 5 -1] - assert_equal {} [r zrange ztmp 5 -2] - - # out of range end index - assert_equal {a b c d} [r zrange ztmp 0 5] - assert_equal {b c d} [r zrange ztmp 1 5] - assert_equal {} [r zrange ztmp 0 -5] - assert_equal {} [r zrange ztmp 1 -5] - - # withscores - assert_equal {a 1 b 2 c 3 d 4} [r zrange ztmp 0 -1 withscores] - } - - test "ZREVRANGE basics - $encoding" { - r del ztmp - r zadd ztmp 1 a - r zadd ztmp 2 b - r zadd ztmp 3 c - r zadd ztmp 4 d - - assert_equal {d c b a} [r zrevrange ztmp 0 -1] - assert_equal {d c b} [r zrevrange ztmp 0 -2] - assert_equal {c b a} [r zrevrange ztmp 1 -1] - assert_equal {c b} [r zrevrange ztmp 1 -2] - assert_equal {b a} [r zrevrange ztmp -2 -1] - assert_equal {b} [r zrevrange ztmp -2 -2] - - # out of range start index - assert_equal {d c b} [r zrevrange ztmp -5 2] - assert_equal {d c} [r zrevrange ztmp -5 1] - assert_equal {} [r zrevrange ztmp 5 -1] - assert_equal {} [r zrevrange ztmp 5 -2] - - # out of range end index - assert_equal {d c b a} [r zrevrange ztmp 0 5] - assert_equal {c b a} [r zrevrange ztmp 1 5] - assert_equal {} [r zrevrange ztmp 0 -5] - assert_equal {} [r zrevrange ztmp 1 -5] - - # withscores - assert_equal {d 4 c 3 b 2 a 1} [r zrevrange ztmp 0 -1 withscores] - } - - test "ZRANK/ZREVRANK basics - $encoding" { - set nullres {$-1} - if {$::force_resp3} { - set nullres {_} - } - r del zranktmp - r zadd zranktmp 10 x - r zadd zranktmp 20 y - r zadd zranktmp 30 z - assert_equal 0 [r zrank zranktmp x] - assert_equal 1 [r zrank zranktmp y] - assert_equal 2 [r zrank zranktmp z] - assert_equal 2 [r zrevrank zranktmp x] - assert_equal 1 [r zrevrank zranktmp y] - assert_equal 0 [r zrevrank zranktmp z] - r readraw 1 - assert_equal $nullres [r zrank zranktmp foo] - assert_equal $nullres [r zrevrank zranktmp foo] - r readraw 0 - - # withscore - set nullres {*-1} - if {$::force_resp3} { - set nullres {_} - } - assert_equal {0 10} [r zrank zranktmp x withscore] - assert_equal {1 20} [r zrank zranktmp y withscore] - assert_equal {2 30} [r zrank zranktmp z withscore] - assert_equal {2 10} [r zrevrank zranktmp x withscore] - assert_equal {1 20} [r zrevrank zranktmp y withscore] - assert_equal {0 30} [r zrevrank zranktmp z withscore] - r readraw 1 - assert_equal $nullres [r zrank zranktmp foo withscore] - assert_equal $nullres [r zrevrank zranktmp foo withscore] - r readraw 0 - } - - test "ZRANK - after deletion - $encoding" { - r zrem zranktmp y - assert_equal 0 [r zrank zranktmp x] - assert_equal 1 [r zrank zranktmp z] - assert_equal {0 10} [r zrank zranktmp x withscore] - assert_equal {1 30} [r zrank zranktmp z withscore] - } - - test "ZINCRBY - can create a new sorted set - $encoding" { - r del zset - r zincrby zset 1 foo - assert_equal {foo} [r zrange zset 0 -1] - assert_equal 1 [r zscore zset foo] - } - - test "ZINCRBY - increment and decrement - $encoding" { - r zincrby zset 2 foo - r zincrby zset 1 bar - assert_equal {bar foo} [r zrange zset 0 -1] - - r zincrby zset 10 bar - r zincrby zset -5 foo - r zincrby zset -5 bar - assert_equal {foo bar} [r zrange zset 0 -1] - - assert_equal -2 [r zscore zset foo] - assert_equal 6 [r zscore zset bar] - } - - test "ZINCRBY return value - $encoding" { - r del ztmp - set retval [r zincrby ztmp 1.0 x] - assert {$retval == 1.0} - } - - proc create_default_zset {} { - create_zset zset {-inf a 1 b 2 c 3 d 4 e 5 f +inf g} - } - - proc create_long_zset {key length} { - r del $key - for {set i 0} {$i < $length} {incr i 1} { - r zadd $key $i i$i - } - } - - test "ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics - $encoding" { - create_default_zset - - # inclusive range - assert_equal {a b c} [r zrangebyscore zset -inf 2] - assert_equal {b c d} [r zrangebyscore zset 0 3] - assert_equal {d e f} [r zrangebyscore zset 3 6] - assert_equal {e f g} [r zrangebyscore zset 4 +inf] - assert_equal {c b a} [r zrevrangebyscore zset 2 -inf] - assert_equal {d c b} [r zrevrangebyscore zset 3 0] - assert_equal {f e d} [r zrevrangebyscore zset 6 3] - assert_equal {g f e} [r zrevrangebyscore zset +inf 4] - assert_equal 3 [r zcount zset 0 3] - - # exclusive range - assert_equal {b} [r zrangebyscore zset (-inf (2] - assert_equal {b c} [r zrangebyscore zset (0 (3] - assert_equal {e f} [r zrangebyscore zset (3 (6] - assert_equal {f} [r zrangebyscore zset (4 (+inf] - assert_equal {b} [r zrevrangebyscore zset (2 (-inf] - assert_equal {c b} [r zrevrangebyscore zset (3 (0] - assert_equal {f e} [r zrevrangebyscore zset (6 (3] - assert_equal {f} [r zrevrangebyscore zset (+inf (4] - assert_equal 2 [r zcount zset (0 (3] - - # test empty ranges - r zrem zset a - r zrem zset g - - # inclusive - assert_equal {} [r zrangebyscore zset 4 2] - assert_equal {} [r zrangebyscore zset 6 +inf] - assert_equal {} [r zrangebyscore zset -inf -6] - assert_equal {} [r zrevrangebyscore zset +inf 6] - assert_equal {} [r zrevrangebyscore zset -6 -inf] - - # exclusive - assert_equal {} [r zrangebyscore zset (4 (2] - assert_equal {} [r zrangebyscore zset 2 (2] - assert_equal {} [r zrangebyscore zset (2 2] - assert_equal {} [r zrangebyscore zset (6 (+inf] - assert_equal {} [r zrangebyscore zset (-inf (-6] - assert_equal {} [r zrevrangebyscore zset (+inf (6] - assert_equal {} [r zrevrangebyscore zset (-6 (-inf] - - # empty inner range - assert_equal {} [r zrangebyscore zset 2.4 2.6] - assert_equal {} [r zrangebyscore zset (2.4 2.6] - assert_equal {} [r zrangebyscore zset 2.4 (2.6] - assert_equal {} [r zrangebyscore zset (2.4 (2.6] - } - - test "ZRANGEBYSCORE with WITHSCORES - $encoding" { - create_default_zset - assert_equal {b 1 c 2 d 3} [r zrangebyscore zset 0 3 withscores] - assert_equal {d 3 c 2 b 1} [r zrevrangebyscore zset 3 0 withscores] - } - - test "ZRANGEBYSCORE with LIMIT - $encoding" { - create_default_zset - assert_equal {b c} [r zrangebyscore zset 0 10 LIMIT 0 2] - assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 3] - assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 10] - assert_equal {} [r zrangebyscore zset 0 10 LIMIT 20 10] - assert_equal {f e} [r zrevrangebyscore zset 10 0 LIMIT 0 2] - assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 3] - assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 10] - assert_equal {} [r zrevrangebyscore zset 10 0 LIMIT 20 10] - # zrangebyscore uses different logic when offset > ZSKIPLIST_MAX_SEARCH - create_long_zset zset 30 - assert_equal {i12 i13 i14} [r zrangebyscore zset 0 20 LIMIT 12 3] - assert_equal {i14 i15} [r zrangebyscore zset 0 20 LIMIT 14 2] - assert_equal {i19 i20 i21} [r zrangebyscore zset 0 30 LIMIT 19 3] - assert_equal {i29} [r zrangebyscore zset 10 30 LIMIT 19 2] - assert_equal {i17 i16 i15} [r zrevrangebyscore zset 30 10 LIMIT 12 3] - assert_equal {i6 i5} [r zrevrangebyscore zset 20 0 LIMIT 14 2] - assert_equal {i2 i1 i0} [r zrevrangebyscore zset 20 0 LIMIT 18 5] - assert_equal {i0} [r zrevrangebyscore zset 20 0 LIMIT 20 5] - } - - test "ZRANGEBYSCORE with LIMIT and WITHSCORES - $encoding" { - create_default_zset - assert_equal {e 4 f 5} [r zrangebyscore zset 2 5 LIMIT 2 3 WITHSCORES] - assert_equal {d 3 c 2} [r zrevrangebyscore zset 5 2 LIMIT 2 3 WITHSCORES] - assert_equal {} [r zrangebyscore zset 2 5 LIMIT 12 13 WITHSCORES] - } - - test "ZRANGEBYSCORE with non-value min or max - $encoding" { - assert_error "*not*float*" {r zrangebyscore fooz str 1} - assert_error "*not*float*" {r zrangebyscore fooz 1 str} - assert_error "*not*float*" {r zrangebyscore fooz 1 NaN} - } - - proc create_default_lex_zset {} { - create_zset zset {0 alpha 0 bar 0 cool 0 down - 0 elephant 0 foo 0 great 0 hill - 0 omega} - } - - proc create_long_lex_zset {} { - create_zset zset {0 alpha 0 bar 0 cool 0 down - 0 elephant 0 foo 0 great 0 hill - 0 island 0 jacket 0 key 0 lip - 0 max 0 null 0 omega 0 point - 0 query 0 result 0 sea 0 tree} - } - - test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics - $encoding" { - create_default_lex_zset - - # inclusive range - assert_equal {alpha bar cool} [r zrangebylex zset - \[cool] - assert_equal {bar cool down} [r zrangebylex zset \[bar \[down] - assert_equal {great hill omega} [r zrangebylex zset \[g +] - assert_equal {cool bar alpha} [r zrevrangebylex zset \[cool -] - assert_equal {down cool bar} [r zrevrangebylex zset \[down \[bar] - assert_equal {omega hill great foo elephant down} [r zrevrangebylex zset + \[d] - assert_equal 3 [r zlexcount zset \[ele \[h] - - # exclusive range - assert_equal {alpha bar} [r zrangebylex zset - (cool] - assert_equal {cool} [r zrangebylex zset (bar (down] - assert_equal {hill omega} [r zrangebylex zset (great +] - assert_equal {bar alpha} [r zrevrangebylex zset (cool -] - assert_equal {cool} [r zrevrangebylex zset (down (bar] - assert_equal {omega hill} [r zrevrangebylex zset + (great] - assert_equal 2 [r zlexcount zset (ele (great] - - # inclusive and exclusive - assert_equal {} [r zrangebylex zset (az (b] - assert_equal {} [r zrangebylex zset (z +] - assert_equal {} [r zrangebylex zset - \[aaaa] - assert_equal {} [r zrevrangebylex zset \[elez \[elex] - assert_equal {} [r zrevrangebylex zset (hill (omega] - } - - test "ZLEXCOUNT advanced - $encoding" { - create_default_lex_zset - - assert_equal 9 [r zlexcount zset - +] - assert_equal 0 [r zlexcount zset + -] - assert_equal 0 [r zlexcount zset + \[c] - assert_equal 0 [r zlexcount zset \[c -] - assert_equal 8 [r zlexcount zset \[bar +] - assert_equal 5 [r zlexcount zset \[bar \[foo] - assert_equal 4 [r zlexcount zset \[bar (foo] - assert_equal 4 [r zlexcount zset (bar \[foo] - assert_equal 3 [r zlexcount zset (bar (foo] - assert_equal 5 [r zlexcount zset - (foo] - assert_equal 1 [r zlexcount zset (maxstring +] - } - - test "ZRANGEBYLEX with LIMIT - $encoding" { - create_default_lex_zset - assert_equal {alpha bar} [r zrangebylex zset - \[cool LIMIT 0 2] - assert_equal {bar cool} [r zrangebylex zset - \[cool LIMIT 1 2] - assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 0 0] - assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 2 0] - assert_equal {bar} [r zrangebylex zset \[bar \[down LIMIT 0 1] - assert_equal {cool} [r zrangebylex zset \[bar \[down LIMIT 1 1] - assert_equal {bar cool down} [r zrangebylex zset \[bar \[down LIMIT 0 100] - assert_equal {omega hill great foo elephant} [r zrevrangebylex zset + \[d LIMIT 0 5] - assert_equal {omega hill great foo} [r zrevrangebylex zset + \[d LIMIT 0 4] - assert_equal {great foo elephant} [r zrevrangebylex zset + \[d LIMIT 2 3] - # zrangebylex uses different logic when offset > ZSKIPLIST_MAX_SEARCH - create_long_lex_zset - assert_equal {max null} [r zrangebylex zset - \[tree LIMIT 12 2] - assert_equal {point query} [r zrangebylex zset - \[tree LIMIT 15 2] - assert_equal {} [r zrangebylex zset \[max \[tree LIMIT 10 0] - assert_equal {} [r zrangebylex zset \[max \[tree LIMIT 12 0] - assert_equal {max} [r zrangebylex zset \[max \[null LIMIT 0 1] - assert_equal {null} [r zrangebylex zset \[max \[null LIMIT 1 1] - assert_equal {max null omega point} [r zrangebylex zset \[max \[point LIMIT 0 100] - assert_equal {tree sea result query point} [r zrevrangebylex zset + \[o LIMIT 0 5] - assert_equal {tree sea result query} [r zrevrangebylex zset + \[o LIMIT 0 4] - assert_equal {omega null max lip} [r zrevrangebylex zset + \[l LIMIT 5 4] - assert_equal {elephant down} [r zrevrangebylex zset + \[a LIMIT 15 2] - assert_equal {bar alpha} [r zrevrangebylex zset + - LIMIT 18 6] - assert_equal {hill great foo} [r zrevrangebylex zset + \[c LIMIT 12 3] - } - - test "ZRANGEBYLEX with invalid lex range specifiers - $encoding" { - assert_error "*not*string*" {r zrangebylex fooz foo bar} - assert_error "*not*string*" {r zrangebylex fooz \[foo bar} - assert_error "*not*string*" {r zrangebylex fooz foo \[bar} - assert_error "*not*string*" {r zrangebylex fooz +x \[bar} - assert_error "*not*string*" {r zrangebylex fooz -x \[bar} - } - - test "ZREMRANGEBYSCORE basics - $encoding" { - proc remrangebyscore {min max} { - create_zset zset {1 a 2 b 3 c 4 d 5 e} - assert_equal 1 [r exists zset] - r zremrangebyscore zset $min $max - } - - # inner range - assert_equal 3 [remrangebyscore 2 4] - assert_equal {a e} [r zrange zset 0 -1] - - # start underflow - assert_equal 1 [remrangebyscore -10 1] - assert_equal {b c d e} [r zrange zset 0 -1] - - # end overflow - assert_equal 1 [remrangebyscore 5 10] - assert_equal {a b c d} [r zrange zset 0 -1] - - # switch min and max - assert_equal 0 [remrangebyscore 4 2] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # -inf to mid - assert_equal 3 [remrangebyscore -inf 3] - assert_equal {d e} [r zrange zset 0 -1] - - # mid to +inf - assert_equal 3 [remrangebyscore 3 +inf] - assert_equal {a b} [r zrange zset 0 -1] - - # -inf to +inf - assert_equal 5 [remrangebyscore -inf +inf] - assert_equal {} [r zrange zset 0 -1] - - # exclusive min - assert_equal 4 [remrangebyscore (1 5] - assert_equal {a} [r zrange zset 0 -1] - assert_equal 3 [remrangebyscore (2 5] - assert_equal {a b} [r zrange zset 0 -1] - - # exclusive max - assert_equal 4 [remrangebyscore 1 (5] - assert_equal {e} [r zrange zset 0 -1] - assert_equal 3 [remrangebyscore 1 (4] - assert_equal {d e} [r zrange zset 0 -1] - - # exclusive min and max - assert_equal 3 [remrangebyscore (1 (5] - assert_equal {a e} [r zrange zset 0 -1] - - # destroy when empty - assert_equal 5 [remrangebyscore 1 5] - assert_equal 0 [r exists zset] - } - - test "ZREMRANGEBYSCORE with non-value min or max - $encoding" { - assert_error "*not*float*" {r zremrangebyscore fooz str 1} - assert_error "*not*float*" {r zremrangebyscore fooz 1 str} - assert_error "*not*float*" {r zremrangebyscore fooz 1 NaN} - } - - test "ZREMRANGEBYRANK basics - $encoding" { - proc remrangebyrank {min max} { - create_zset zset {1 a 2 b 3 c 4 d 5 e} - assert_equal 1 [r exists zset] - r zremrangebyrank zset $min $max - } - - # inner range - assert_equal 3 [remrangebyrank 1 3] - assert_equal {a e} [r zrange zset 0 -1] - - # start underflow - assert_equal 1 [remrangebyrank -10 0] - assert_equal {b c d e} [r zrange zset 0 -1] - - # start overflow - assert_equal 0 [remrangebyrank 10 -1] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # end underflow - assert_equal 0 [remrangebyrank 0 -10] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # end overflow - assert_equal 5 [remrangebyrank 0 10] - assert_equal {} [r zrange zset 0 -1] - - # destroy when empty - assert_equal 5 [remrangebyrank 0 4] - assert_equal 0 [r exists zset] - } - - test "ZREMRANGEBYLEX basics - $encoding" { - proc remrangebylex {min max} { - create_default_lex_zset - assert_equal 1 [r exists zset] - r zremrangebylex zset $min $max - } - - # inclusive range - assert_equal 3 [remrangebylex - \[cool] - assert_equal {down elephant foo great hill omega} [r zrange zset 0 -1] - assert_equal 3 [remrangebylex \[bar \[down] - assert_equal {alpha elephant foo great hill omega} [r zrange zset 0 -1] - assert_equal 3 [remrangebylex \[g +] - assert_equal {alpha bar cool down elephant foo} [r zrange zset 0 -1] - assert_equal 6 [r zcard zset] - - # exclusive range - assert_equal 2 [remrangebylex - (cool] - assert_equal {cool down elephant foo great hill omega} [r zrange zset 0 -1] - assert_equal 1 [remrangebylex (bar (down] - assert_equal {alpha bar down elephant foo great hill omega} [r zrange zset 0 -1] - assert_equal 2 [remrangebylex (great +] - assert_equal {alpha bar cool down elephant foo great} [r zrange zset 0 -1] - assert_equal 7 [r zcard zset] - - # inclusive and exclusive - assert_equal 0 [remrangebylex (az (b] - assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1] - assert_equal 0 [remrangebylex (z +] - assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1] - assert_equal 0 [remrangebylex - \[aaaa] - assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1] - assert_equal 9 [r zcard zset] - - # destroy when empty - assert_equal 9 [remrangebylex - +] - assert_equal 0 [r zcard zset] - assert_equal 0 [r exists zset] - } - - test "ZUNIONSTORE against non-existing key doesn't set destination - $encoding" { - r del zseta{t} - assert_equal 0 [r zunionstore dst_key{t} 1 zseta{t}] - assert_equal 0 [r exists dst_key{t}] - } - - test "ZUNION/ZINTER/ZINTERCARD/ZDIFF against non-existing key - $encoding" { - r del zseta - assert_equal {} [r zunion 1 zseta] - assert_equal {} [r zinter 1 zseta] - assert_equal 0 [r zintercard 1 zseta] - assert_equal 0 [r zintercard 1 zseta limit 0] - assert_equal {} [r zdiff 1 zseta] - } - - test "ZUNIONSTORE with empty set - $encoding" { - r del zseta{t} zsetb{t} - r zadd zseta{t} 1 a - r zadd zseta{t} 2 b - r zunionstore zsetc{t} 2 zseta{t} zsetb{t} - r zrange zsetc{t} 0 -1 withscores - } {a 1 b 2} - - test "ZUNION/ZINTER/ZINTERCARD/ZDIFF with empty set - $encoding" { - r del zseta{t} zsetb{t} - r zadd zseta{t} 1 a - r zadd zseta{t} 2 b - assert_equal {a 1 b 2} [r zunion 2 zseta{t} zsetb{t} withscores] - assert_equal {} [r zinter 2 zseta{t} zsetb{t} withscores] - assert_equal 0 [r zintercard 2 zseta{t} zsetb{t}] - assert_equal 0 [r zintercard 2 zseta{t} zsetb{t} limit 0] - assert_equal {a 1 b 2} [r zdiff 2 zseta{t} zsetb{t} withscores] - } - - test "ZUNIONSTORE basics - $encoding" { - r del zseta{t} zsetb{t} zsetc{t} - r zadd zseta{t} 1 a - r zadd zseta{t} 2 b - r zadd zseta{t} 3 c - r zadd zsetb{t} 1 b - r zadd zsetb{t} 2 c - r zadd zsetb{t} 3 d - - assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t}] - assert_equal {a 1 b 3 d 3 c 5} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZUNION/ZINTER/ZINTERCARD/ZDIFF with integer members - $encoding" { - r del zsetd{t} zsetf{t} - r zadd zsetd{t} 1 1 - r zadd zsetd{t} 2 2 - r zadd zsetd{t} 3 3 - r zadd zsetf{t} 1 1 - r zadd zsetf{t} 3 3 - r zadd zsetf{t} 4 4 - - assert_equal {1 2 2 2 4 4 3 6} [r zunion 2 zsetd{t} zsetf{t} withscores] - assert_equal {1 2 3 6} [r zinter 2 zsetd{t} zsetf{t} withscores] - assert_equal 2 [r zintercard 2 zsetd{t} zsetf{t}] - assert_equal 2 [r zintercard 2 zsetd{t} zsetf{t} limit 0] - assert_equal {2 2} [r zdiff 2 zsetd{t} zsetf{t} withscores] - } - - test "ZUNIONSTORE with weights - $encoding" { - assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} weights 2 3] - assert_equal {a 2 b 7 d 9 c 12} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZUNION with weights - $encoding" { - assert_equal {a 2 b 7 d 9 c 12} [r zunion 2 zseta{t} zsetb{t} weights 2 3 withscores] - assert_equal {b 7 c 12} [r zinter 2 zseta{t} zsetb{t} weights 2 3 withscores] - } - - test "ZUNIONSTORE with a regular set and weights - $encoding" { - r del seta{t} - r sadd seta{t} a - r sadd seta{t} b - r sadd seta{t} c - - assert_equal 4 [r zunionstore zsetc{t} 2 seta{t} zsetb{t} weights 2 3] - assert_equal {a 2 b 5 c 8 d 9} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZUNIONSTORE with AGGREGATE MIN - $encoding" { - assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} aggregate min] - assert_equal {a 1 b 1 c 2 d 3} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZUNION/ZINTER with AGGREGATE MIN - $encoding" { - assert_equal {a 1 b 1 c 2 d 3} [r zunion 2 zseta{t} zsetb{t} aggregate min withscores] - assert_equal {b 1 c 2} [r zinter 2 zseta{t} zsetb{t} aggregate min withscores] - } - - test "ZUNIONSTORE with AGGREGATE MAX - $encoding" { - assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} aggregate max] - assert_equal {a 1 b 2 c 3 d 3} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZUNION/ZINTER with AGGREGATE MAX - $encoding" { - assert_equal {a 1 b 2 c 3 d 3} [r zunion 2 zseta{t} zsetb{t} aggregate max withscores] - assert_equal {b 2 c 3} [r zinter 2 zseta{t} zsetb{t} aggregate max withscores] - } - - test "ZINTERSTORE basics - $encoding" { - assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t}] - assert_equal {b 3 c 5} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZINTER basics - $encoding" { - assert_equal {b 3 c 5} [r zinter 2 zseta{t} zsetb{t} withscores] - } - - test "ZINTERCARD with illegal arguments" { - assert_error "ERR syntax error*" {r zintercard 1 zseta{t} zseta{t}} - assert_error "ERR syntax error*" {r zintercard 1 zseta{t} bar_arg} - assert_error "ERR syntax error*" {r zintercard 1 zseta{t} LIMIT} - - assert_error "ERR LIMIT*" {r zintercard 1 myset{t} LIMIT -1} - assert_error "ERR LIMIT*" {r zintercard 1 myset{t} LIMIT a} - } - - test "ZINTERCARD basics - $encoding" { - assert_equal 2 [r zintercard 2 zseta{t} zsetb{t}] - assert_equal 2 [r zintercard 2 zseta{t} zsetb{t} limit 0] - assert_equal 1 [r zintercard 2 zseta{t} zsetb{t} limit 1] - assert_equal 2 [r zintercard 2 zseta{t} zsetb{t} limit 10] - } - - test "ZINTER RESP3 - $encoding" { - r hello 3 - assert_equal {{b 3.0} {c 5.0}} [r zinter 2 zseta{t} zsetb{t} withscores] - r hello 2 - } - - test "ZINTERSTORE with weights - $encoding" { - assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} weights 2 3] - assert_equal {b 7 c 12} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZINTER with weights - $encoding" { - assert_equal {b 7 c 12} [r zinter 2 zseta{t} zsetb{t} weights 2 3 withscores] - } - - test "ZINTERSTORE with a regular set and weights - $encoding" { - r del seta{t} - r sadd seta{t} a - r sadd seta{t} b - r sadd seta{t} c - assert_equal 2 [r zinterstore zsetc{t} 2 seta{t} zsetb{t} weights 2 3] - assert_equal {b 5 c 8} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZINTERSTORE with AGGREGATE MIN - $encoding" { - assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} aggregate min] - assert_equal {b 1 c 2} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZINTERSTORE with AGGREGATE MAX - $encoding" { - assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} aggregate max] - assert_equal {b 2 c 3} [r zrange zsetc{t} 0 -1 withscores] - } - - foreach cmd {ZUNIONSTORE ZINTERSTORE} { - test "$cmd with +inf/-inf scores - $encoding" { - r del zsetinf1{t} zsetinf2{t} - - r zadd zsetinf1{t} +inf key - r zadd zsetinf2{t} +inf key - r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} - assert_equal inf [r zscore zsetinf3{t} key] - - r zadd zsetinf1{t} -inf key - r zadd zsetinf2{t} +inf key - r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} - assert_equal 0 [r zscore zsetinf3{t} key] - - r zadd zsetinf1{t} +inf key - r zadd zsetinf2{t} -inf key - r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} - assert_equal 0 [r zscore zsetinf3{t} key] - - r zadd zsetinf1{t} -inf key - r zadd zsetinf2{t} -inf key - r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} - assert_equal -inf [r zscore zsetinf3{t} key] - } - - test "$cmd with NaN weights - $encoding" { - r del zsetinf1{t} zsetinf2{t} - - r zadd zsetinf1{t} 1.0 key - r zadd zsetinf2{t} 1.0 key - assert_error "*weight*not*float*" { - r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} weights nan nan - } - } - } - - test "ZDIFFSTORE basics - $encoding" { - assert_equal 1 [r zdiffstore zsetc{t} 2 zseta{t} zsetb{t}] - assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZDIFF basics - $encoding" { - assert_equal {a 1} [r zdiff 2 zseta{t} zsetb{t} withscores] - } - - test "ZDIFFSTORE with a regular set - $encoding" { - r del seta{t} - r sadd seta{t} a - r sadd seta{t} b - r sadd seta{t} c - assert_equal 1 [r zdiffstore zsetc{t} 2 seta{t} zsetb{t}] - assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZDIFF subtracting set from itself - $encoding" { - assert_equal 0 [r zdiffstore zsetc{t} 2 zseta{t} zseta{t}] - assert_equal {} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZDIFF algorithm 1 - $encoding" { - r del zseta{t} zsetb{t} zsetc{t} - r zadd zseta{t} 1 a - r zadd zseta{t} 2 b - r zadd zseta{t} 3 c - r zadd zsetb{t} 1 b - r zadd zsetb{t} 2 c - r zadd zsetb{t} 3 d - assert_equal 1 [r zdiffstore zsetc{t} 2 zseta{t} zsetb{t}] - assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores] - } - - test "ZDIFF algorithm 2 - $encoding" { - r del zseta{t} zsetb{t} zsetc{t} zsetd{t} zsete{t} - r zadd zseta{t} 1 a - r zadd zseta{t} 2 b - r zadd zseta{t} 3 c - r zadd zseta{t} 5 e - r zadd zsetb{t} 1 b - r zadd zsetc{t} 1 c - r zadd zsetd{t} 1 d - assert_equal 2 [r zdiffstore zsete{t} 4 zseta{t} zsetb{t} zsetc{t} zsetd{t}] - assert_equal {a 1 e 5} [r zrange zsete{t} 0 -1 withscores] - } - - test "ZDIFF fuzzing - $encoding" { - for {set j 0} {$j < 100} {incr j} { - unset -nocomplain s - array set s {} - set args {} - set num_sets [expr {[randomInt 10]+1}] - for {set i 0} {$i < $num_sets} {incr i} { - set num_elements [randomInt 100] - r del zset_$i{t} - lappend args zset_$i{t} - while {$num_elements} { - set ele [randomValue] - r zadd zset_$i{t} [randomInt 100] $ele - if {$i == 0} { - set s($ele) x - } else { - unset -nocomplain s($ele) - } - incr num_elements -1 - } - } - set result [lsort [r zdiff [llength $args] {*}$args]] - assert_equal $result [lsort [array names s]] - } - } - - foreach {pop} {ZPOPMIN ZPOPMAX} { - test "$pop with the count 0 returns an empty array" { - r del zset - r zadd zset 1 a 2 b 3 c - assert_equal {} [r $pop zset 0] - - # Make sure we can distinguish between an empty array and a null response - r readraw 1 - assert_equal {*0} [r $pop zset 0] - r readraw 0 - - assert_equal 3 [r zcard zset] - } - - test "$pop with negative count" { - r set zset foo - assert_error "ERR *must be positive" {r $pop zset -1} - - r del zset - assert_error "ERR *must be positive" {r $pop zset -2} - - r zadd zset 1 a 2 b 3 c - assert_error "ERR *must be positive" {r $pop zset -3} - } - } - - foreach {popmin popmax} {ZPOPMIN ZPOPMAX ZMPOP_MIN ZMPOP_MAX} { - test "Basic $popmin/$popmax with a single key - $encoding" { - r del zset - verify_zpop_response r $popmin zset 0 {} {} - - create_zset zset {-1 a 1 b 2 c 3 d 4 e} - verify_zpop_response r $popmin zset 0 {a -1} {zset {{a -1}}} - verify_zpop_response r $popmin zset 0 {b 1} {zset {{b 1}}} - verify_zpop_response r $popmax zset 0 {e 4} {zset {{e 4}}} - verify_zpop_response r $popmax zset 0 {d 3} {zset {{d 3}}} - verify_zpop_response r $popmin zset 0 {c 2} {zset {{c 2}}} - assert_equal 0 [r exists zset] - } - - test "$popmin/$popmax with count - $encoding" { - r del z1 - verify_zpop_response r $popmin z1 2 {} {} - - create_zset z1 {0 a 1 b 2 c 3 d} - verify_zpop_response r $popmin z1 2 {a 0 b 1} {z1 {{a 0} {b 1}}} - verify_zpop_response r $popmax z1 2 {d 3 c 2} {z1 {{d 3} {c 2}}} - } - } - - foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} { - test "$popmin/$popmax with a single existing sorted set - $encoding" { - set rd [redis_deferring_client] - create_zset zset {0 a 1 b 2 c 3 d} - - verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}} - verify_bzpop_response $rd $popmax zset 5 0 {zset d 3} {zset {{d 3}}} - verify_bzpop_response $rd $popmin zset 5 0 {zset b 1} {zset {{b 1}}} - verify_bzpop_response $rd $popmax zset 5 0 {zset c 2} {zset {{c 2}}} - assert_equal 0 [r exists zset] - $rd close - } - - test "$popmin/$popmax with multiple existing sorted sets - $encoding" { - set rd [redis_deferring_client] - create_zset z1{t} {0 a 1 b 2 c} - create_zset z2{t} {3 d 4 e 5 f} - - verify_bzpop_two_key_response $rd $popmin z1{t} z2{t} 5 0 {z1{t} a 0} {z1{t} {{a 0}}} - verify_bzpop_two_key_response $rd $popmax z1{t} z2{t} 5 0 {z1{t} c 2} {z1{t} {{c 2}}} - assert_equal 1 [r zcard z1{t}] - assert_equal 3 [r zcard z2{t}] - - verify_bzpop_two_key_response $rd $popmax z2{t} z1{t} 5 0 {z2{t} f 5} {z2{t} {{f 5}}} - verify_bzpop_two_key_response $rd $popmin z2{t} z1{t} 5 0 {z2{t} d 3} {z2{t} {{d 3}}} - assert_equal 1 [r zcard z1{t}] - assert_equal 1 [r zcard z2{t}] - $rd close - } - - test "$popmin/$popmax second sorted set has members - $encoding" { - set rd [redis_deferring_client] - r del z1{t} - create_zset z2{t} {3 d 4 e 5 f} - - verify_bzpop_two_key_response $rd $popmax z1{t} z2{t} 5 0 {z2{t} f 5} {z2{t} {{f 5}}} - verify_bzpop_two_key_response $rd $popmin z1{t} z2{t} 5 0 {z2{t} d 3} {z2{t} {{d 3}}} - assert_equal 0 [r zcard z1{t}] - assert_equal 1 [r zcard z2{t}] - $rd close - } - } - - foreach {popmin popmax} {ZPOPMIN ZPOPMAX ZMPOP_MIN ZMPOP_MAX} { - test "Basic $popmin/$popmax - $encoding RESP3" { - r hello 3 - create_zset z1 {0 a 1 b 2 c 3 d} - verify_zpop_response r $popmin z1 0 {a 0.0} {z1 {{a 0.0}}} - verify_zpop_response r $popmax z1 0 {d 3.0} {z1 {{d 3.0}}} - r hello 2 - } - - test "$popmin/$popmax with count - $encoding RESP3" { - r hello 3 - create_zset z1 {0 a 1 b 2 c 3 d} - verify_zpop_response r $popmin z1 2 {{a 0.0} {b 1.0}} {z1 {{a 0.0} {b 1.0}}} - verify_zpop_response r $popmax z1 2 {{d 3.0} {c 2.0}} {z1 {{d 3.0} {c 2.0}}} - r hello 2 - } - } - - foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} { - test "$popmin/$popmax - $encoding RESP3" { - r hello 3 - set rd [redis_deferring_client] - create_zset zset {0 a 1 b 2 c 3 d} - - verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}} - verify_bzpop_response $rd $popmax zset 5 0 {zset d 3} {zset {{d 3}}} - verify_bzpop_response $rd $popmin zset 5 0 {zset b 1} {zset {{b 1}}} - verify_bzpop_response $rd $popmax zset 5 0 {zset c 2} {zset {{c 2}}} - - assert_equal 0 [r exists zset] - r hello 2 - $rd close - } - } - - r config set zset-max-ziplist-entries $original_max_entries - r config set zset-max-ziplist-value $original_max_value - } - - basics listpack - basics skiplist - - test "ZPOP/ZMPOP against wrong type" { - r set foo{t} bar - assert_error "*WRONGTYPE*" {r zpopmin foo{t}} - assert_error "*WRONGTYPE*" {r zpopmin foo{t} 0} - assert_error "*WRONGTYPE*" {r zpopmax foo{t}} - assert_error "*WRONGTYPE*" {r zpopmax foo{t} 0} - assert_error "*WRONGTYPE*" {r zpopmin foo{t} 2} - - assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} min} - assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} max} - assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} max count 200} - - r del foo{t} - r set foo2{t} bar - assert_error "*WRONGTYPE*" {r zmpop 2 foo{t} foo2{t} min} - assert_error "*WRONGTYPE*" {r zmpop 2 foo2{t} foo1{t} max count 1} - } - - test "ZMPOP with illegal argument" { - assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop} - assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop 1} - assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop 1 myzset{t}} - - assert_error "ERR numkeys*" {r zmpop 0 myzset{t} MIN} - assert_error "ERR numkeys*" {r zmpop a myzset{t} MIN} - assert_error "ERR numkeys*" {r zmpop -1 myzset{t} MAX} - - assert_error "ERR syntax error*" {r zmpop 1 myzset{t} bad_where} - assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MIN bar_arg} - assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MAX MIN} - assert_error "ERR syntax error*" {r zmpop 1 myzset{t} COUNT} - assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MAX COUNT 1 COUNT 2} - assert_error "ERR syntax error*" {r zmpop 2 myzset{t} myzset2{t} bad_arg} - - assert_error "ERR count*" {r zmpop 1 myzset{t} MIN COUNT 0} - assert_error "ERR count*" {r zmpop 1 myzset{t} MAX COUNT a} - assert_error "ERR count*" {r zmpop 1 myzset{t} MIN COUNT -1} - assert_error "ERR count*" {r zmpop 2 myzset{t} myzset2{t} MAX COUNT -1} - } - - test "ZMPOP propagate as pop with count command to replica" { - set repl [attach_to_replication_stream] - - # ZMPOP min/max propagate as ZPOPMIN/ZPOPMAX with count - r zadd myzset{t} 1 one 2 two 3 three - - # Pop elements from one zset. - r zmpop 1 myzset{t} min - r zmpop 1 myzset{t} max count 1 - - # Now the zset have only one element - r zmpop 2 myzset{t} myzset2{t} min count 10 - - # No elements so we don't propagate. - r zmpop 2 myzset{t} myzset2{t} max count 10 - - # Pop elements from the second zset. - r zadd myzset2{t} 1 one 2 two 3 three - r zmpop 2 myzset{t} myzset2{t} min count 2 - r zmpop 2 myzset{t} myzset2{t} max count 1 - - # Pop all elements. - r zadd myzset{t} 1 one 2 two 3 three - r zadd myzset2{t} 4 four 5 five 6 six - r zmpop 2 myzset{t} myzset2{t} min count 10 - r zmpop 2 myzset{t} myzset2{t} max count 10 - - assert_replication_stream $repl { - {select *} - {zadd myzset{t} 1 one 2 two 3 three} - {zpopmin myzset{t} 1} - {zpopmax myzset{t} 1} - {zpopmin myzset{t} 1} - {zadd myzset2{t} 1 one 2 two 3 three} - {zpopmin myzset2{t} 2} - {zpopmax myzset2{t} 1} - {zadd myzset{t} 1 one 2 two 3 three} - {zadd myzset2{t} 4 four 5 five 6 six} - {zpopmin myzset{t} 3} - {zpopmax myzset2{t} 3} - } - close_replication_stream $repl - } {} {needs:repl} - - foreach resp {3 2} { - set rd [redis_deferring_client] - - if {[lsearch $::denytags "resp3"] >= 0} { - if {$resp == 3} {continue} - } elseif {$::force_resp3} { - if {$resp == 2} {continue} - } - r hello $resp - $rd hello $resp - $rd read - - test "ZPOPMIN/ZPOPMAX readraw in RESP$resp" { - r del zset{t} - create_zset zset2{t} {1 a 2 b 3 c 4 d 5 e} - - r readraw 1 - - # ZPOP against non existing key. - assert_equal {*0} [r zpopmin zset{t}] - assert_equal {*0} [r zpopmin zset{t} 1] - - # ZPOP without COUNT option. - assert_equal {*2} [r zpopmin zset2{t}] - assert_equal [r read] {$1} - assert_equal [r read] {a} - verify_score_response r $resp 1 - - # ZPOP with COUNT option. - if {$resp == 2} { - assert_equal {*2} [r zpopmax zset2{t} 1] - assert_equal [r read] {$1} - assert_equal [r read] {e} - } elseif {$resp == 3} { - assert_equal {*1} [r zpopmax zset2{t} 1] - assert_equal [r read] {*2} - assert_equal [r read] {$1} - assert_equal [r read] {e} - } - verify_score_response r $resp 5 - - r readraw 0 - } - - test "BZPOPMIN/BZPOPMAX readraw in RESP$resp" { - r del zset{t} - create_zset zset2{t} {1 a 2 b 3 c 4 d 5 e} - - $rd readraw 1 - - # BZPOP released on timeout. - $rd bzpopmin zset{t} 0.01 - verify_nil_response $resp [$rd read] - $rd bzpopmax zset{t} 0.01 - verify_nil_response $resp [$rd read] - - # BZPOP non-blocking path. - $rd bzpopmin zset1{t} zset2{t} 0.1 - assert_equal [$rd read] {*3} - assert_equal [$rd read] {$8} - assert_equal [$rd read] {zset2{t}} - assert_equal [$rd read] {$1} - assert_equal [$rd read] {a} - verify_score_response $rd $resp 1 - - # BZPOP blocking path. - $rd bzpopmin zset{t} 5 - wait_for_blocked_client - r zadd zset{t} 1 a - assert_equal [$rd read] {*3} - assert_equal [$rd read] {$7} - assert_equal [$rd read] {zset{t}} - assert_equal [$rd read] {$1} - assert_equal [$rd read] {a} - verify_score_response $rd $resp 1 - - $rd readraw 0 - } - - test "ZMPOP readraw in RESP$resp" { - r del zset{t} zset2{t} - create_zset zset3{t} {1 a} - create_zset zset4{t} {1 a 2 b 3 c 4 d 5 e} - - r readraw 1 - - # ZMPOP against non existing key. - verify_nil_response $resp [r zmpop 1 zset{t} min] - verify_nil_response $resp [r zmpop 1 zset{t} max count 1] - verify_nil_response $resp [r zmpop 2 zset{t} zset2{t} min] - verify_nil_response $resp [r zmpop 2 zset{t} zset2{t} max count 1] - - # ZMPOP with one input key. - assert_equal {*2} [r zmpop 1 zset3{t} max] - assert_equal [r read] {$8} - assert_equal [r read] {zset3{t}} - assert_equal [r read] {*1} - assert_equal [r read] {*2} - assert_equal [r read] {$1} - assert_equal [r read] {a} - verify_score_response r $resp 1 - - # ZMPOP with COUNT option. - assert_equal {*2} [r zmpop 2 zset3{t} zset4{t} min count 2] - assert_equal [r read] {$8} - assert_equal [r read] {zset4{t}} - assert_equal [r read] {*2} - assert_equal [r read] {*2} - assert_equal [r read] {$1} - assert_equal [r read] {a} - verify_score_response r $resp 1 - assert_equal [r read] {*2} - assert_equal [r read] {$1} - assert_equal [r read] {b} - verify_score_response r $resp 2 - - r readraw 0 - } - - test "BZMPOP readraw in RESP$resp" { - r del zset{t} zset2{t} - create_zset zset3{t} {1 a 2 b 3 c 4 d 5 e} - - $rd readraw 1 - - # BZMPOP released on timeout. - $rd bzmpop 0.01 1 zset{t} min - verify_nil_response $resp [$rd read] - $rd bzmpop 0.01 2 zset{t} zset2{t} max - verify_nil_response $resp [$rd read] - - # BZMPOP non-blocking path. - $rd bzmpop 0.1 2 zset3{t} zset4{t} min - - assert_equal [$rd read] {*2} - assert_equal [$rd read] {$8} - assert_equal [$rd read] {zset3{t}} - assert_equal [$rd read] {*1} - assert_equal [$rd read] {*2} - assert_equal [$rd read] {$1} - assert_equal [$rd read] {a} - verify_score_response $rd $resp 1 - - # BZMPOP blocking path with COUNT option. - $rd bzmpop 5 2 zset{t} zset2{t} max count 2 - wait_for_blocked_client - r zadd zset2{t} 1 a 2 b 3 c - - assert_equal [$rd read] {*2} - assert_equal [$rd read] {$8} - assert_equal [$rd read] {zset2{t}} - assert_equal [$rd read] {*2} - assert_equal [$rd read] {*2} - assert_equal [$rd read] {$1} - assert_equal [$rd read] {c} - verify_score_response $rd $resp 3 - assert_equal [$rd read] {*2} - assert_equal [$rd read] {$1} - assert_equal [$rd read] {b} - verify_score_response $rd $resp 2 - - } - - $rd close - r hello 2 - } - - test {ZINTERSTORE regression with two sets, intset+hashtable} { - r del seta{t} setb{t} setc{t} - r sadd set1{t} a - r sadd set2{t} 10 - r zinterstore set3{t} 2 set1{t} set2{t} - } {0} - - test {ZUNIONSTORE regression, should not create NaN in scores} { - r zadd z{t} -inf neginf - r zunionstore out{t} 1 z{t} weights 0 - r zrange out{t} 0 -1 withscores - } {neginf 0} - - test {ZINTERSTORE #516 regression, mixed sets and ziplist zsets} { - r sadd one{t} 100 101 102 103 - r sadd two{t} 100 200 201 202 - r zadd three{t} 1 500 1 501 1 502 1 503 1 100 - r zinterstore to_here{t} 3 one{t} two{t} three{t} WEIGHTS 0 0 1 - r zrange to_here{t} 0 -1 - } {100} - - test {ZUNIONSTORE result is sorted} { - # Create two sets with common and not common elements, perform - # the UNION, check that elements are still sorted. - r del one{t} two{t} dest{t} - set cmd1 [list r zadd one{t}] - set cmd2 [list r zadd two{t}] - for {set j 0} {$j < 1000} {incr j} { - lappend cmd1 [expr rand()] [randomInt 1000] - lappend cmd2 [expr rand()] [randomInt 1000] - } - {*}$cmd1 - {*}$cmd2 - assert {[r zcard one{t}] > 100} - assert {[r zcard two{t}] > 100} - r zunionstore dest{t} 2 one{t} two{t} - set oldscore 0 - foreach {ele score} [r zrange dest{t} 0 -1 withscores] { - assert {$score >= $oldscore} - set oldscore $score - } - } - - test "ZUNIONSTORE/ZINTERSTORE/ZDIFFSTORE error if using WITHSCORES " { - assert_error "*ERR*syntax*" {r zunionstore foo{t} 2 zsetd{t} zsetf{t} withscores} - assert_error "*ERR*syntax*" {r zinterstore foo{t} 2 zsetd{t} zsetf{t} withscores} - assert_error "*ERR*syntax*" {r zdiffstore foo{t} 2 zsetd{t} zsetf{t} withscores} - } - - test {ZMSCORE retrieve} { - r del zmscoretest - r zadd zmscoretest 10 x - r zadd zmscoretest 20 y - - r zmscore zmscoretest x y - } {10 20} - - test {ZMSCORE retrieve from empty set} { - r del zmscoretest - - r zmscore zmscoretest x y - } {{} {}} - - test {ZMSCORE retrieve with missing member} { - r del zmscoretest - r zadd zmscoretest 10 x - - r zmscore zmscoretest x y - } {10 {}} - - test {ZMSCORE retrieve single member} { - r del zmscoretest - r zadd zmscoretest 10 x - r zadd zmscoretest 20 y - - r zmscore zmscoretest x - } {10} - - test {ZMSCORE retrieve requires one or more members} { - r del zmscoretest - r zadd zmscoretest 10 x - r zadd zmscoretest 20 y - - catch {r zmscore zmscoretest} e - assert_match {*ERR*wrong*number*arg*} $e - } - - test "ZSET commands don't accept the empty strings as valid score" { - assert_error "*not*float*" {r zadd myzset "" abc} - } - - test "zunionInterDiffGenericCommand at least 1 input key" { - assert_error {*at least 1 input key * 'zunion' command} {r zunion 0 key{t}} - assert_error {*at least 1 input key * 'zunionstore' command} {r zunionstore dst_key{t} 0 key{t}} - assert_error {*at least 1 input key * 'zinter' command} {r zinter 0 key{t}} - assert_error {*at least 1 input key * 'zinterstore' command} {r zinterstore dst_key{t} 0 key{t}} - assert_error {*at least 1 input key * 'zdiff' command} {r zdiff 0 key{t}} - assert_error {*at least 1 input key * 'zdiffstore' command} {r zdiffstore dst_key{t} 0 key{t}} - assert_error {*at least 1 input key * 'zintercard' command} {r zintercard 0 key{t}} - } - - proc stressers {encoding} { - set original_max_entries [lindex [r config get zset-max-ziplist-entries] 1] - set original_max_value [lindex [r config get zset-max-ziplist-value] 1] - if {$encoding == "listpack"} { - # Little extra to allow proper fuzzing in the sorting stresser - r config set zset-max-ziplist-entries 256 - r config set zset-max-ziplist-value 64 - set elements 128 - } elseif {$encoding == "skiplist"} { - r config set zset-max-ziplist-entries 0 - r config set zset-max-ziplist-value 0 - if {$::accurate} {set elements 1000} else {set elements 100} - } else { - puts "Unknown sorted set encoding" - exit - } - - test "ZSCORE - $encoding" { - r del zscoretest - set aux {} - for {set i 0} {$i < $elements} {incr i} { - set score [expr rand()] - lappend aux $score - r zadd zscoretest $score $i - } - - assert_encoding $encoding zscoretest - for {set i 0} {$i < $elements} {incr i} { - # If an IEEE 754 double-precision number is converted to a decimal string with at - # least 17 significant digits (reply of zscore), and then converted back to double-precision representation, - # the final result replied via zscore command must match the original number present on the $aux list. - # Given Tcl is mostly very relaxed about types (everything is a string) we need to use expr to convert a string to float. - assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]] - } - } - - test "ZMSCORE - $encoding" { - r del zscoretest - set aux {} - for {set i 0} {$i < $elements} {incr i} { - set score [expr rand()] - lappend aux $score - r zadd zscoretest $score $i - } - - assert_encoding $encoding zscoretest - for {set i 0} {$i < $elements} {incr i} { - # Check above notes on IEEE 754 double-precision comparison - assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]] - } - } - - test "ZSCORE after a DEBUG RELOAD - $encoding" { - r del zscoretest - set aux {} - for {set i 0} {$i < $elements} {incr i} { - set score [expr rand()] - lappend aux $score - r zadd zscoretest $score $i - } - - r debug reload - assert_encoding $encoding zscoretest - for {set i 0} {$i < $elements} {incr i} { - # Check above notes on IEEE 754 double-precision comparison - assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]] - } - } {} {needs:debug} - - test "ZSET sorting stresser - $encoding" { - set delta 0 - for {set test 0} {$test < 2} {incr test} { - unset -nocomplain auxarray - array set auxarray {} - set auxlist {} - r del myzset - for {set i 0} {$i < $elements} {incr i} { - if {$test == 0} { - set score [expr rand()] - } else { - set score [expr int(rand()*10)] - } - set auxarray($i) $score - r zadd myzset $score $i - # Random update - if {[expr rand()] < .2} { - set j [expr int(rand()*1000)] - if {$test == 0} { - set score [expr rand()] - } else { - set score [expr int(rand()*10)] - } - set auxarray($j) $score - r zadd myzset $score $j - } - } - foreach {item score} [array get auxarray] { - lappend auxlist [list $score $item] - } - set sorted [lsort -command zlistAlikeSort $auxlist] - set auxlist {} - foreach x $sorted { - lappend auxlist [lindex $x 1] - } - - assert_encoding $encoding myzset - set fromredis [r zrange myzset 0 -1] - set delta 0 - for {set i 0} {$i < [llength $fromredis]} {incr i} { - if {[lindex $fromredis $i] != [lindex $auxlist $i]} { - incr delta - } - } - } - assert_equal 0 $delta - } - - test "ZRANGEBYSCORE fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set err {} - r del zset - for {set i 0} {$i < $elements} {incr i} { - r zadd zset [expr rand()] $i - } - - assert_encoding $encoding zset - for {set i 0} {$i < 100} {incr i} { - set min [expr rand()] - set max [expr rand()] - if {$min > $max} { - set aux $min - set min $max - set max $aux - } - set low [r zrangebyscore zset -inf $min] - set ok [r zrangebyscore zset $min $max] - set high [r zrangebyscore zset $max +inf] - set lowx [r zrangebyscore zset -inf ($min] - set okx [r zrangebyscore zset ($min ($max] - set highx [r zrangebyscore zset ($max +inf] - - if {[r zcount zset -inf $min] != [llength $low]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset $min $max] != [llength $ok]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset $max +inf] != [llength $high]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset -inf ($min] != [llength $lowx]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset ($min ($max] != [llength $okx]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset ($max +inf] != [llength $highx]} { - append err "Error, len does not match zcount\n" - } - - foreach x $low { - set score [r zscore zset $x] - if {$score > $min} { - append err "Error, score for $x is $score > $min\n" - } - } - foreach x $lowx { - set score [r zscore zset $x] - if {$score >= $min} { - append err "Error, score for $x is $score >= $min\n" - } - } - foreach x $ok { - set score [r zscore zset $x] - if {$score < $min || $score > $max} { - append err "Error, score for $x is $score outside $min-$max range\n" - } - } - foreach x $okx { - set score [r zscore zset $x] - if {$score <= $min || $score >= $max} { - append err "Error, score for $x is $score outside $min-$max open range\n" - } - } - foreach x $high { - set score [r zscore zset $x] - if {$score < $max} { - append err "Error, score for $x is $score < $max\n" - } - } - foreach x $highx { - set score [r zscore zset $x] - if {$score <= $max} { - append err "Error, score for $x is $score <= $max\n" - } - } - } - assert_equal {} $err - } - - test "ZRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set lexset {} - r del zset - for {set j 0} {$j < $elements} {incr j} { - set e [randstring 0 30 alpha] - lappend lexset $e - r zadd zset 0 $e - } - set lexset [lsort -unique $lexset] - for {set j 0} {$j < 100} {incr j} { - set min [randstring 0 30 alpha] - set max [randstring 0 30 alpha] - set mininc [randomInt 2] - set maxinc [randomInt 2] - if {$mininc} {set cmin "\[$min"} else {set cmin "($min"} - if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"} - set rev [randomInt 2] - if {$rev} { - set cmd zrevrangebylex - } else { - set cmd zrangebylex - } - - # Make sure data is the same in both sides - assert {[r zrange zset 0 -1] eq $lexset} - - # Get the Redis output - set output [r $cmd zset $cmin $cmax] - if {$rev} { - set outlen [r zlexcount zset $cmax $cmin] - } else { - set outlen [r zlexcount zset $cmin $cmax] - } - - # Compute the same output via Tcl - set o {} - set copy $lexset - if {(!$rev && [string compare $min $max] > 0) || - ($rev && [string compare $max $min] > 0)} { - # Empty output when ranges are inverted. - } else { - if {$rev} { - # Invert the Tcl array using Redis itself. - set copy [r zrevrange zset 0 -1] - # Invert min / max as well - lassign [list $min $max $mininc $maxinc] \ - max min maxinc mininc - } - foreach e $copy { - set mincmp [string compare $e $min] - set maxcmp [string compare $e $max] - if { - ($mininc && $mincmp >= 0 || !$mininc && $mincmp > 0) - && - ($maxinc && $maxcmp <= 0 || !$maxinc && $maxcmp < 0) - } { - lappend o $e - } - } - } - assert {$o eq $output} - assert {$outlen eq [llength $output]} - } - } - - test "ZREMRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set lexset {} - r del zset{t} zsetcopy{t} - for {set j 0} {$j < $elements} {incr j} { - set e [randstring 0 30 alpha] - lappend lexset $e - r zadd zset{t} 0 $e - } - set lexset [lsort -unique $lexset] - for {set j 0} {$j < 100} {incr j} { - # Copy... - r zunionstore zsetcopy{t} 1 zset{t} - set lexsetcopy $lexset - - set min [randstring 0 30 alpha] - set max [randstring 0 30 alpha] - set mininc [randomInt 2] - set maxinc [randomInt 2] - if {$mininc} {set cmin "\[$min"} else {set cmin "($min"} - if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"} - - # Make sure data is the same in both sides - assert {[r zrange zset{t} 0 -1] eq $lexset} - - # Get the range we are going to remove - set torem [r zrangebylex zset{t} $cmin $cmax] - set toremlen [r zlexcount zset{t} $cmin $cmax] - r zremrangebylex zsetcopy{t} $cmin $cmax - set output [r zrange zsetcopy{t} 0 -1] - - # Remove the range with Tcl from the original list - if {$toremlen} { - set first [lsearch -exact $lexsetcopy [lindex $torem 0]] - set last [expr {$first+$toremlen-1}] - set lexsetcopy [lreplace $lexsetcopy $first $last] - } - assert {$lexsetcopy eq $output} - } - } - - test "ZSETs skiplist implementation backlink consistency test - $encoding" { - set diff 0 - for {set j 0} {$j < $elements} {incr j} { - r zadd myzset [expr rand()] "Element-$j" - r zrem myzset "Element-[expr int(rand()*$elements)]" - } - - assert_encoding $encoding myzset - set l1 [r zrange myzset 0 -1] - set l2 [r zrevrange myzset 0 -1] - for {set j 0} {$j < [llength $l1]} {incr j} { - if {[lindex $l1 $j] ne [lindex $l2 end-$j]} { - incr diff - } - } - assert_equal 0 $diff - } - - test "ZSETs ZRANK augmented skip list stress testing - $encoding" { - set err {} - r del myzset - for {set k 0} {$k < 2000} {incr k} { - set i [expr {$k % $elements}] - if {[expr rand()] < .2} { - r zrem myzset $i - } else { - set score [expr rand()] - r zadd myzset $score $i - assert_encoding $encoding myzset - } - - set card [r zcard myzset] - if {$card > 0} { - set index [randomInt $card] - set ele [lindex [r zrange myzset $index $index] 0] - set rank [r zrank myzset $ele] - if {$rank != $index} { - set err "$ele RANK is wrong! ($rank != $index)" - break - } - } - } - assert_equal {} $err - } - - foreach {pop} {BZPOPMIN BZMPOP_MIN} { - test "$pop, ZADD + DEL should not awake blocked client" { - set rd [redis_deferring_client] - r del zset - - bzpop_command $rd $pop zset 0 - wait_for_blocked_client - - r multi - r zadd zset 0 foo - r del zset - r exec - r del zset - r zadd zset 1 bar - - verify_pop_response $pop [$rd read] {zset bar 1} {zset {{bar 1}}} - $rd close - } - - test "$pop, ZADD + DEL + SET should not awake blocked client" { - set rd [redis_deferring_client] - r del zset - - bzpop_command $rd $pop zset 0 - wait_for_blocked_client - - r multi - r zadd zset 0 foo - r del zset - r set zset foo - r exec - r del zset - r zadd zset 1 bar - - verify_pop_response $pop [$rd read] {zset bar 1} {zset {{bar 1}}} - $rd close - } - } - - test {BZPOPMIN unblock but the key is expired and then block again - reprocessing command} { - r flushall - r debug set-active-expire 0 - set rd [redis_deferring_client] - - set start [clock milliseconds] - $rd bzpopmin zset{t} 1 - wait_for_blocked_clients_count 1 - - # The exec will try to awake the blocked client, but the key is expired, - # so the client will be blocked again during the command reprocessing. - r multi - r zadd zset{t} 1 one - r pexpire zset{t} 100 - r debug sleep 0.2 - r exec - - assert_equal {} [$rd read] - set end [clock milliseconds] - - # Before the fix in #13004, this time would have been 1200+ (i.e. more than 1200ms), - # now it should be 1000, but in order to avoid timing issues, we increase the range a bit. - assert_range [expr $end-$start] 1000 1150 - - r debug set-active-expire 1 - $rd close - } {0} {needs:debug} - - test "BZPOPMIN with same key multiple times should work" { - set rd [redis_deferring_client] - r del z1{t} z2{t} - - # Data arriving after the BZPOPMIN. - $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0 - wait_for_blocked_client - r zadd z1{t} 0 a - assert_equal [$rd read] {z1{t} a 0} - $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0 - wait_for_blocked_client - r zadd z2{t} 1 b - assert_equal [$rd read] {z2{t} b 1} - - # Data already there. - r zadd z1{t} 0 a - r zadd z2{t} 1 b - $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0 - assert_equal [$rd read] {z1{t} a 0} - $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0 - assert_equal [$rd read] {z2{t} b 1} - $rd close - } - - foreach {pop} {BZPOPMIN BZMPOP_MIN} { - test "MULTI/EXEC is isolated from the point of view of $pop" { - set rd [redis_deferring_client] - r del zset - - bzpop_command $rd $pop zset 0 - wait_for_blocked_client - - r multi - r zadd zset 0 a - r zadd zset 1 b - r zadd zset 2 c - r exec - - verify_pop_response $pop [$rd read] {zset a 0} {zset {{a 0}}} - $rd close - } - - test "$pop with variadic ZADD" { - set rd [redis_deferring_client] - r del zset - if {$::valgrind} {after 100} - bzpop_command $rd $pop zset 0 - wait_for_blocked_client - if {$::valgrind} {after 100} - assert_equal 2 [r zadd zset -1 foo 1 bar] - if {$::valgrind} {after 100} - verify_pop_response $pop [$rd read] {zset foo -1} {zset {{foo -1}}} - assert_equal {bar} [r zrange zset 0 -1] - $rd close - } - - test "$pop with zero timeout should block indefinitely" { - set rd [redis_deferring_client] - r del zset - bzpop_command $rd $pop zset 0 - wait_for_blocked_client - after 1000 - r zadd zset 0 foo - verify_pop_response $pop [$rd read] {zset foo 0} {zset {{foo 0}}} - $rd close - } - } - - r config set zset-max-ziplist-entries $original_max_entries - r config set zset-max-ziplist-value $original_max_value - } - - tags {"slow"} { - stressers listpack - stressers skiplist - } - - test "BZPOP/BZMPOP against wrong type" { - r set foo{t} bar - assert_error "*WRONGTYPE*" {r bzpopmin foo{t} 1} - assert_error "*WRONGTYPE*" {r bzpopmax foo{t} 1} - - assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} min} - assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} max} - assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} min count 10} - - r del foo{t} - r set foo2{t} bar - assert_error "*WRONGTYPE*" {r bzmpop 1 2 foo{t} foo2{t} min} - assert_error "*WRONGTYPE*" {r bzmpop 1 2 foo2{t} foo{t} max count 1} - } - - test "BZMPOP with illegal argument" { - assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop} - assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop 0 1} - assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop 0 1 myzset{t}} - - assert_error "ERR numkeys*" {r bzmpop 1 0 myzset{t} MIN} - assert_error "ERR numkeys*" {r bzmpop 1 a myzset{t} MIN} - assert_error "ERR numkeys*" {r bzmpop 1 -1 myzset{t} MAX} - - assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} bad_where} - assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MIN bar_arg} - assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MAX MIN} - assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} COUNT} - assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MIN COUNT 1 COUNT 2} - assert_error "ERR syntax error*" {r bzmpop 1 2 myzset{t} myzset2{t} bad_arg} - - assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MIN COUNT 0} - assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MAX COUNT a} - assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MIN COUNT -1} - assert_error "ERR count*" {r bzmpop 1 2 myzset{t} myzset2{t} MAX COUNT -1} - } - - test "BZMPOP with multiple blocked clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] - set rd4 [redis_deferring_client] - r del myzset{t} myzset2{t} - - $rd1 bzmpop 0 2 myzset{t} myzset2{t} min count 1 - wait_for_blocked_clients_count 1 - $rd2 bzmpop 0 2 myzset{t} myzset2{t} max count 10 - wait_for_blocked_clients_count 2 - $rd3 bzmpop 0 2 myzset{t} myzset2{t} min count 10 - wait_for_blocked_clients_count 3 - $rd4 bzmpop 0 2 myzset{t} myzset2{t} max count 1 - wait_for_blocked_clients_count 4 - - r multi - r zadd myzset{t} 1 a 2 b 3 c 4 d 5 e - r zadd myzset2{t} 1 a 2 b 3 c 4 d 5 e - r exec - - assert_equal {myzset{t} {{a 1}}} [$rd1 read] - assert_equal {myzset{t} {{e 5} {d 4} {c 3} {b 2}}} [$rd2 read] - assert_equal {myzset2{t} {{a 1} {b 2} {c 3} {d 4} {e 5}}} [$rd3 read] - - r zadd myzset2{t} 1 a 2 b 3 c - assert_equal {myzset2{t} {{c 3}}} [$rd4 read] - - r del myzset{t} myzset2{t} - $rd1 close - $rd2 close - $rd3 close - $rd4 close - } - - test "BZMPOP propagate as pop with count command to replica" { - set rd [redis_deferring_client] - set repl [attach_to_replication_stream] - - # BZMPOP without being blocked. - r zadd myzset{t} 1 one 2 two 3 three - r zadd myzset2{t} 4 four 5 five 6 six - r bzmpop 0 1 myzset{t} min - r bzmpop 0 2 myzset{t} myzset2{t} max count 10 - r bzmpop 0 2 myzset{t} myzset2{t} max count 10 - - # BZMPOP that gets blocked. - $rd bzmpop 0 1 myzset{t} min count 1 - wait_for_blocked_client - r zadd myzset{t} 1 one - $rd bzmpop 0 2 myzset{t} myzset2{t} min count 5 - wait_for_blocked_client - r zadd myzset{t} 1 one 2 two 3 three - $rd bzmpop 0 2 myzset{t} myzset2{t} max count 10 - wait_for_blocked_client - r zadd myzset2{t} 4 four 5 five 6 six - - # Released on timeout. - assert_equal {} [r bzmpop 0.01 1 myzset{t} max count 10] - r set foo{t} bar ;# something else to propagate after, so we can make sure the above pop didn't. - - $rd close - - assert_replication_stream $repl { - {select *} - {zadd myzset{t} 1 one 2 two 3 three} - {zadd myzset2{t} 4 four 5 five 6 six} - {zpopmin myzset{t} 1} - {zpopmax myzset{t} 2} - {zpopmax myzset2{t} 3} - {zadd myzset{t} 1 one} - {zpopmin myzset{t} 1} - {zadd myzset{t} 1 one 2 two 3 three} - {zpopmin myzset{t} 3} - {zadd myzset2{t} 4 four 5 five 6 six} - {zpopmax myzset2{t} 3} - {set foo{t} bar} - } - close_replication_stream $repl - } {} {needs:repl} - - test "BZMPOP should not blocks on non key arguments - #10762" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - r del myzset myzset2 myzset3 - - $rd1 bzmpop 0 1 myzset min count 10 - wait_for_blocked_clients_count 1 - $rd2 bzmpop 0 2 myzset2 myzset3 max count 10 - wait_for_blocked_clients_count 2 - - # These non-key keys will not unblock the clients. - r zadd 0 100 timeout_value - r zadd 1 200 numkeys_value - r zadd min 300 min_token - r zadd max 400 max_token - r zadd count 500 count_token - r zadd 10 600 count_value - - r zadd myzset 1 zset - r zadd myzset3 1 zset3 - assert_equal {myzset {{zset 1}}} [$rd1 read] - assert_equal {myzset3 {{zset3 1}}} [$rd2 read] - - $rd1 close - $rd2 close - } {0} {cluster:skip} - - test {ZSET skiplist order consistency when elements are moved} { - set original_max [lindex [r config get zset-max-ziplist-entries] 1] - r config set zset-max-ziplist-entries 0 - for {set times 0} {$times < 10} {incr times} { - r del zset - for {set j 0} {$j < 1000} {incr j} { - r zadd zset [randomInt 50] ele-[randomInt 10] - } - - # Make sure that element ordering is correct - set prev_element {} - set prev_score -1 - foreach {element score} [r zrange zset 0 -1 WITHSCORES] { - # Assert that elements are in increasing ordering - assert { - $prev_score < $score || - ($prev_score == $score && - [string compare $prev_element $element] == -1) - } - set prev_element $element - set prev_score $score - } - } - r config set zset-max-ziplist-entries $original_max - } - - test {ZRANGESTORE basic} { - r flushall - r zadd z1{t} 1 a 2 b 3 c 4 d - set res [r zrangestore z2{t} z1{t} 0 -1] - assert_equal $res 4 - r zrange z2{t} 0 -1 withscores - } {a 1 b 2 c 3 d 4} - - test {ZRANGESTORE RESP3} { - r hello 3 - assert_equal [r zrange z2{t} 0 -1 withscores] {{a 1.0} {b 2.0} {c 3.0} {d 4.0}} - r hello 2 - } - - test {ZRANGESTORE range} { - set res [r zrangestore z2{t} z1{t} 1 2] - assert_equal $res 2 - r zrange z2{t} 0 -1 withscores - } {b 2 c 3} - - test {ZRANGESTORE BYLEX} { - set res [r zrangestore z3{t} z1{t} \[b \[c BYLEX] - assert_equal $res 2 - assert_encoding listpack z3{t} - set res [r zrangestore z2{t} z1{t} \[b \[c BYLEX] - assert_equal $res 2 - r zrange z2{t} 0 -1 withscores - } {b 2 c 3} - - test {ZRANGESTORE BYSCORE} { - set res [r zrangestore z4{t} z1{t} 1 2 BYSCORE] - assert_equal $res 2 - assert_encoding listpack z4{t} - set res [r zrangestore z2{t} z1{t} 1 2 BYSCORE] - assert_equal $res 2 - r zrange z2{t} 0 -1 withscores - } {a 1 b 2} - - test {ZRANGESTORE BYSCORE LIMIT} { - set res [r zrangestore z2{t} z1{t} 0 5 BYSCORE LIMIT 0 2] - assert_equal $res 2 - r zrange z2{t} 0 -1 withscores - } {a 1 b 2} - - test {ZRANGESTORE BYSCORE REV LIMIT} { - set res [r zrangestore z2{t} z1{t} 5 0 BYSCORE REV LIMIT 0 2] - assert_equal $res 2 - r zrange z2{t} 0 -1 withscores - } {c 3 d 4} - - test {ZRANGE BYSCORE REV LIMIT} { - r zrange z1{t} 5 0 BYSCORE REV LIMIT 0 2 WITHSCORES - } {d 4 c 3} - - test {ZRANGESTORE - src key missing} { - set res [r zrangestore z2{t} missing{t} 0 -1] - assert_equal $res 0 - r exists z2{t} - } {0} - - test {ZRANGESTORE - src key wrong type} { - r zadd z2{t} 1 a - r set foo{t} bar - assert_error "*WRONGTYPE*" {r zrangestore z2{t} foo{t} 0 -1} - r zrange z2{t} 0 -1 - } {a} - - test {ZRANGESTORE - empty range} { - set res [r zrangestore z2{t} z1{t} 5 6] - assert_equal $res 0 - r exists z2{t} - } {0} - - test {ZRANGESTORE BYLEX - empty range} { - set res [r zrangestore z2{t} z1{t} \[f \[g BYLEX] - assert_equal $res 0 - r exists z2{t} - } {0} - - test {ZRANGESTORE BYSCORE - empty range} { - set res [r zrangestore z2{t} z1{t} 5 6 BYSCORE] - assert_equal $res 0 - r exists z2{t} - } {0} - - test {ZRANGE BYLEX} { - r zrange z1{t} \[b \[c BYLEX - } {b c} - - test {ZRANGESTORE invalid syntax} { - catch {r zrangestore z2{t} z1{t} 0 -1 limit 1 2} err - assert_match "*syntax*" $err - catch {r zrangestore z2{t} z1{t} 0 -1 WITHSCORES} err - assert_match "*syntax*" $err - } - - test {ZRANGESTORE with zset-max-listpack-entries 0 #10767 case} { - set original_max [lindex [r config get zset-max-listpack-entries] 1] - r config set zset-max-listpack-entries 0 - r del z1{t} z2{t} - r zadd z1{t} 1 a - assert_encoding skiplist z1{t} - assert_equal 1 [r zrangestore z2{t} z1{t} 0 -1] - assert_encoding skiplist z2{t} - r config set zset-max-listpack-entries $original_max - } - - test {ZRANGESTORE with zset-max-listpack-entries 1 dst key should use skiplist encoding} { - set original_max [lindex [r config get zset-max-listpack-entries] 1] - r config set zset-max-listpack-entries 1 - r del z1{t} z2{t} z3{t} - r zadd z1{t} 1 a 2 b - assert_equal 1 [r zrangestore z2{t} z1{t} 0 0] - assert_encoding listpack z2{t} - assert_equal 2 [r zrangestore z3{t} z1{t} 0 1] - assert_encoding skiplist z3{t} - r config set zset-max-listpack-entries $original_max - } - - test {ZRANGE invalid syntax} { - catch {r zrange z1{t} 0 -1 limit 1 2} err - assert_match "*syntax*" $err - catch {r zrange z1{t} 0 -1 BYLEX WITHSCORES} err - assert_match "*syntax*" $err - catch {r zrevrange z1{t} 0 -1 BYSCORE} err - assert_match "*syntax*" $err - catch {r zrangebyscore z1{t} 0 -1 REV} err - assert_match "*syntax*" $err - } - - proc get_keys {l} { - set res {} - foreach {score key} $l { - lappend res $key - } - return $res - } - - # Check whether the zset members belong to the zset - proc check_member {mydict res} { - foreach ele $res { - assert {[dict exists $mydict $ele]} - } - } - - # Check whether the zset members and score belong to the zset - proc check_member_and_score {mydict res} { - foreach {key val} $res { - assert_equal $val [dict get $mydict $key] - } - } - - foreach {type contents} "listpack {1 a 2 b 3 c} skiplist {1 a 2 b 3 [randstring 70 90 alpha]}" { - set original_max_value [lindex [r config get zset-max-ziplist-value] 1] - r config set zset-max-ziplist-value 10 - create_zset myzset $contents - assert_encoding $type myzset - - test "ZRANDMEMBER - $type" { - unset -nocomplain myzset - array set myzset {} - for {set i 0} {$i < 100} {incr i} { - set key [r zrandmember myzset] - set myzset($key) 1 - } - assert_equal [lsort [get_keys $contents]] [lsort [array names myzset]] - } - r config set zset-max-ziplist-value $original_max_value - } - - test "ZRANDMEMBER with RESP3" { - r hello 3 - set res [r zrandmember myzset 3 withscores] - assert_equal [llength $res] 3 - assert_equal [llength [lindex $res 1]] 2 - - set res [r zrandmember myzset 3] - assert_equal [llength $res] 3 - assert_equal [llength [lindex $res 1]] 1 - r hello 2 - } - - test "ZRANDMEMBER count of 0 is handled correctly" { - r zrandmember myzset 0 - } {} - - test "ZRANDMEMBER with against non existing key" { - r zrandmember nonexisting_key 100 - } {} - - test "ZRANDMEMBER count overflow" { - r zadd myzset 0 a - assert_error {*value is out of range*} {r zrandmember myzset -9223372036854770000 withscores} - assert_error {*value is out of range*} {r zrandmember myzset -9223372036854775808 withscores} - assert_error {*value is out of range*} {r zrandmember myzset -9223372036854775808} - } {} - - # Make sure we can distinguish between an empty array and a null response - r readraw 1 - - test "ZRANDMEMBER count of 0 is handled correctly - emptyarray" { - r zrandmember myzset 0 - } {*0} - - test "ZRANDMEMBER with against non existing key - emptyarray" { - r zrandmember nonexisting_key 100 - } {*0} - - r readraw 0 - - foreach {type contents} " - skiplist {1 a 2 b 3 c 4 d 5 e 6 f 7 g 7 h 9 i 10 [randstring 70 90 alpha]} - listpack {1 a 2 b 3 c 4 d 5 e 6 f 7 g 7 h 9 i 10 j} " { - test "ZRANDMEMBER with - $type" { - set original_max_value [lindex [r config get zset-max-ziplist-value] 1] - r config set zset-max-ziplist-value 10 - create_zset myzset $contents - assert_encoding $type myzset - - # create a dict for easy lookup - set mydict [dict create {*}[r zrange myzset 0 -1 withscores]] - - # We'll stress different parts of the code, see the implementation - # of ZRANDMEMBER for more information, but basically there are - # four different code paths. - - # PATH 1: Use negative count. - - # 1) Check that it returns repeated elements with and without values. - # 2) Check that all the elements actually belong to the original zset. - set res [r zrandmember myzset -20] - assert_equal [llength $res] 20 - check_member $mydict $res - - set res [r zrandmember myzset -1001] - assert_equal [llength $res] 1001 - check_member $mydict $res - - # again with WITHSCORES - set res [r zrandmember myzset -20 withscores] - assert_equal [llength $res] 40 - check_member_and_score $mydict $res - - set res [r zrandmember myzset -1001 withscores] - assert_equal [llength $res] 2002 - check_member_and_score $mydict $res - - # Test random uniform distribution - # df = 9, 40 means 0.00001 probability - set res [r zrandmember myzset -1000] - assert_lessthan [chi_square_value $res] 40 - check_member $mydict $res - - # 3) Check that eventually all the elements are returned. - # Use both WITHSCORES and without - unset -nocomplain auxset - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - if {[expr {$iterations % 2}] == 0} { - set res [r zrandmember myzset -3 withscores] - foreach {key val} $res { - dict append auxset $key $val - } - } else { - set res [r zrandmember myzset -3] - foreach key $res { - dict append auxset $key - } - } - if {[lsort [dict keys $mydict]] eq - [lsort [dict keys $auxset]]} { - break; - } - } - assert {$iterations != 0} - - # PATH 2: positive count (unique behavior) with requested size - # equal or greater than set size. - foreach size {10 20} { - set res [r zrandmember myzset $size] - assert_equal [llength $res] 10 - assert_equal [lsort $res] [lsort [dict keys $mydict]] - check_member $mydict $res - - # again with WITHSCORES - set res [r zrandmember myzset $size withscores] - assert_equal [llength $res] 20 - assert_equal [lsort $res] [lsort $mydict] - check_member_and_score $mydict $res - } - - # PATH 3: Ask almost as elements as there are in the set. - # In this case the implementation will duplicate the original - # set and will remove random elements up to the requested size. - # - # PATH 4: Ask a number of elements definitely smaller than - # the set size. - # - # We can test both the code paths just changing the size but - # using the same code. - foreach size {1 2 8} { - # 1) Check that all the elements actually belong to the - # original set. - set res [r zrandmember myzset $size] - assert_equal [llength $res] $size - check_member $mydict $res - - # again with WITHSCORES - set res [r zrandmember myzset $size withscores] - assert_equal [llength $res] [expr {$size * 2}] - check_member_and_score $mydict $res - - # 2) Check that eventually all the elements are returned. - # Use both WITHSCORES and without - unset -nocomplain auxset - unset -nocomplain allkey - set iterations [expr {1000 / $size}] - set all_ele_return false - while {$iterations != 0} { - incr iterations -1 - if {[expr {$iterations % 2}] == 0} { - set res [r zrandmember myzset $size withscores] - foreach {key value} $res { - dict append auxset $key $value - lappend allkey $key - } - } else { - set res [r zrandmember myzset $size] - foreach key $res { - dict append auxset $key - lappend allkey $key - } - } - if {[lsort [dict keys $mydict]] eq - [lsort [dict keys $auxset]]} { - set all_ele_return true - } - } - assert_equal $all_ele_return true - # df = 9, 40 means 0.00001 probability - assert_lessthan [chi_square_value $allkey] 40 - } - } - r config set zset-max-ziplist-value $original_max_value - } - - test {zset score double range} { - set dblmax 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.00000000000000000 - r del zz - r zadd zz $dblmax dblmax - assert_encoding listpack zz - r zscore zz dblmax - } {1.7976931348623157e+308} - - test {zunionInterDiffGenericCommand acts on SET and ZSET} { - r del set_small{t} set_big{t} zset_small{t} zset_big{t} zset_dest{t} - - foreach set_type {intset listpack hashtable} { - # Restore all default configurations before each round of testing. - r config set set-max-intset-entries 512 - r config set set-max-listpack-entries 128 - r config set zset-max-listpack-entries 128 - - r del set_small{t} set_big{t} - - if {$set_type == "intset"} { - r sadd set_small{t} 1 2 3 - r sadd set_big{t} 1 2 3 4 5 - assert_encoding intset set_small{t} - assert_encoding intset set_big{t} - } elseif {$set_type == "listpack"} { - # Add an "a" and then remove it, make sure the set is listpack encoding. - r sadd set_small{t} a 1 2 3 - r sadd set_big{t} a 1 2 3 4 5 - r srem set_small{t} a - r srem set_big{t} a - assert_encoding listpack set_small{t} - assert_encoding listpack set_big{t} - } elseif {$set_type == "hashtable"} { - r config set set-max-intset-entries 0 - r config set set-max-listpack-entries 0 - r sadd set_small{t} 1 2 3 - r sadd set_big{t} 1 2 3 4 5 - assert_encoding hashtable set_small{t} - assert_encoding hashtable set_big{t} - } - - foreach zset_type {listpack skiplist} { - r del zset_small{t} zset_big{t} - - if {$zset_type == "listpack"} { - r zadd zset_small{t} 1 1 2 2 3 3 - r zadd zset_big{t} 1 1 2 2 3 3 4 4 5 5 - assert_encoding listpack zset_small{t} - assert_encoding listpack zset_big{t} - } elseif {$zset_type == "skiplist"} { - r config set zset-max-listpack-entries 0 - r zadd zset_small{t} 1 1 2 2 3 3 - r zadd zset_big{t} 1 1 2 2 3 3 4 4 5 5 - assert_encoding skiplist zset_small{t} - assert_encoding skiplist zset_big{t} - } - - # Test one key is big and one key is small separately. - # The reason for this is because we will sort the sets from smallest to largest. - # So set one big key and one small key, then the test can cover more code paths. - foreach {small_or_big set_key zset_key} { - small set_small{t} zset_big{t} - big set_big{t} zset_small{t} - } { - # The result of these commands are not related to the order of the keys. - assert_equal {1 2 3 4 5} [lsort [r zunion 2 $set_key $zset_key]] - assert_equal {5} [r zunionstore zset_dest{t} 2 $set_key $zset_key] - assert_equal {1 2 3} [lsort [r zinter 2 $set_key $zset_key]] - assert_equal {3} [r zinterstore zset_dest{t} 2 $set_key $zset_key] - assert_equal {3} [r zintercard 2 $set_key $zset_key] - - # The result of sdiff is related to the order of the keys. - if {$small_or_big == "small"} { - assert_equal {} [r zdiff 2 $set_key $zset_key] - assert_equal {0} [r zdiffstore zset_dest{t} 2 $set_key $zset_key] - } else { - assert_equal {4 5} [lsort [r zdiff 2 $set_key $zset_key]] - assert_equal {2} [r zdiffstore zset_dest{t} 2 $set_key $zset_key] - } - } - } - } - - r config set set-max-intset-entries 512 - r config set set-max-listpack-entries 128 - r config set zset-max-listpack-entries 128 - } - - foreach type {single multiple single_multiple} { - test "ZADD overflows the maximum allowed elements in a listpack - $type" { - r del myzset - - set max_entries 64 - set original_max [lindex [r config get zset-max-listpack-entries] 1] - r config set zset-max-listpack-entries $max_entries - - if {$type == "single"} { - # All are single zadd commands. - for {set i 0} {$i < $max_entries} {incr i} { r zadd myzset $i $i } - } elseif {$type == "multiple"} { - # One zadd command to add all elements. - set args {} - for {set i 0} {$i < $max_entries * 2} {incr i} { lappend args $i } - r zadd myzset {*}$args - } elseif {$type == "single_multiple"} { - # First one zadd adds an element (creates a key) and then one zadd adds all elements. - r zadd myzset 1 1 - set args {} - for {set i 0} {$i < $max_entries * 2} {incr i} { lappend args $i } - r zadd myzset {*}$args - } - - assert_encoding listpack myzset - assert_equal $max_entries [r zcard myzset] - assert_equal 1 [r zadd myzset 1 b] - assert_encoding skiplist myzset - - r config set zset-max-listpack-entries $original_max - } - } -} \ No newline at end of file