diff --git a/Cargo.lock b/Cargo.lock
index 51381e82037a4..7183c990bfce9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1751,7 +1751,7 @@ dependencies = [
"http 1.1.0",
"http-body 1.0.0",
"http-body-util",
- "hyper 1.1.0",
+ "hyper 1.4.1",
"hyper-util",
"itoa",
"matchit",
@@ -2791,22 +2791,22 @@ dependencies = [
[[package]]
name = "console-api"
-version = "0.7.0"
+version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a257c22cd7e487dd4a13d413beabc512c5052f0bc048db0da6a84c3d8a6142fd"
+checksum = "86ed14aa9c9f927213c6e4f3ef75faaad3406134efe84ba2cb7983431d5f0931"
dependencies = [
"futures-core",
- "prost 0.12.1",
- "prost-types 0.12.1",
- "tonic 0.11.0",
+ "prost 0.13.1",
+ "prost-types 0.13.1",
+ "tonic 0.12.1",
"tracing-core",
]
[[package]]
name = "console-subscriber"
-version = "0.3.0"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "31c4cc54bae66f7d9188996404abdf7fdfa23034ef8e43478c8810828abad758"
+checksum = "e2e3a111a37f3333946ebf9da370ba5c5577b18eb342ec683eb488dd21980302"
dependencies = [
"console-api",
"crossbeam-channel",
@@ -2814,14 +2814,15 @@ dependencies = [
"futures-task",
"hdrhistogram",
"humantime",
- "prost 0.12.1",
- "prost-types 0.12.1",
+ "hyper-util",
+ "prost 0.13.1",
+ "prost-types 0.13.1",
"serde",
"serde_json",
"thread_local",
"tokio",
- "tokio-stream",
- "tonic 0.11.0",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tonic 0.12.1",
"tracing",
"tracing-core",
"tracing-subscriber",
@@ -4436,15 +4437,15 @@ dependencies = [
[[package]]
name = "etcd-client"
-version = "0.12.4"
+version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ae697f3928e8c89ae6f4dcf788059f49fd01a76dc53e63628f5a33881f5715e"
+checksum = "39bde3ce50a626efeb1caa9ab1083972d178bebb55ca627639c8ded507dfcbde"
dependencies = [
- "http 0.2.9",
- "prost 0.12.1",
+ "http 1.1.0",
+ "prost 0.13.1",
"tokio",
- "tokio-stream",
- "tonic 0.10.2",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tonic 0.12.1",
"tonic-build",
"tower",
"tower-service",
@@ -5185,7 +5186,7 @@ dependencies = [
"thiserror",
"time",
"tokio",
- "tokio-stream",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"url",
"yup-oauth2",
]
@@ -5301,9 +5302,9 @@ dependencies = [
[[package]]
name = "google-cloud-auth"
-version = "0.15.0"
+version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e09ed5b2998bc8d0d3df09c859028210d4961b8fe779cfda8dc8ca4e83d5def2"
+checksum = "1112c453c2e155b3e683204ffff52bcc6d6495d04b68d9e90cd24161270c5058"
dependencies = [
"async-trait",
"base64 0.21.7",
@@ -5323,9 +5324,9 @@ dependencies = [
[[package]]
name = "google-cloud-bigquery"
-version = "0.9.0"
+version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e321c127945bb44a5cf5129c37530e2494b97afefe7f334a983ac754e40914e"
+checksum = "305cb7214d11b719e9f00f982c1ee1304c674f7a8dfc44a43b8bad3c909750c2"
dependencies = [
"anyhow",
"arrow 50.0.0",
@@ -5350,29 +5351,29 @@ dependencies = [
[[package]]
name = "google-cloud-gax"
-version = "0.17.0"
+version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cb60314136e37de9e2a05ddb427b9c5a39c3d188de2e2f026c6af74425eef44"
+checksum = "9c3eaaad103912825594d674a4b1e556ccbb05a13a6cac17dcfd871997fb760a"
dependencies = [
"google-cloud-token",
- "http 0.2.9",
+ "http 1.1.0",
"thiserror",
"tokio",
"tokio-retry",
- "tonic 0.10.2",
+ "tonic 0.12.1",
"tower",
"tracing",
]
[[package]]
name = "google-cloud-googleapis"
-version = "0.13.0"
+version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32cd184c52aa2619ac1b16ad8b5a752e91d25be88a8cf08eaec19777dfacbe54"
+checksum = "0ae8ab26ef7c7c3f7dfb9cc3982293d031d8e78c85d00ddfb704b5c35aeff7c8"
dependencies = [
- "prost 0.12.1",
- "prost-types 0.12.1",
- "tonic 0.10.2",
+ "prost 0.13.1",
+ "prost-types 0.13.1",
+ "tonic 0.12.1",
]
[[package]]
@@ -5388,9 +5389,9 @@ dependencies = [
[[package]]
name = "google-cloud-pubsub"
-version = "0.25.0"
+version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a35e4a008db5cf01a5c03d3c67bd90b3cad77427ca949f3c8eddd90c4a3c932"
+checksum = "55ef73601dcec5ea144e59969e921d35d66000211603fee8023b7947af09248f"
dependencies = [
"async-channel 1.9.0",
"async-stream",
@@ -5398,7 +5399,7 @@ dependencies = [
"google-cloud-gax",
"google-cloud-googleapis",
"google-cloud-token",
- "prost-types 0.12.1",
+ "prost-types 0.13.1",
"thiserror",
"tokio",
"tokio-util",
@@ -5407,9 +5408,9 @@ dependencies = [
[[package]]
name = "google-cloud-token"
-version = "0.1.1"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fcd62eb34e3de2f085bcc33a09c3e17c4f65650f36d53eb328b00d63bcb536a"
+checksum = "8f49c12ba8b21d128a2ce8585955246977fbce4415f680ebf9199b6f9d6d725f"
dependencies = [
"async-trait",
]
@@ -5595,9 +5596,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
-version = "0.3.2"
+version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
+checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
[[package]]
name = "hex"
@@ -5760,9 +5761,9 @@ dependencies = [
[[package]]
name = "hyper"
-version = "1.1.0"
+version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75"
+checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05"
dependencies = [
"bytes",
"futures-channel",
@@ -5774,6 +5775,7 @@ dependencies = [
"httpdate",
"itoa",
"pin-project-lite",
+ "smallvec",
"tokio",
"want",
]
@@ -5803,7 +5805,7 @@ checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c"
dependencies = [
"futures-util",
"http 1.1.0",
- "hyper 1.1.0",
+ "hyper 1.4.1",
"hyper-util",
"rustls 0.22.4",
"rustls-pki-types",
@@ -5824,6 +5826,19 @@ dependencies = [
"tokio-io-timeout",
]
+[[package]]
+name = "hyper-timeout"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793"
+dependencies = [
+ "hyper 1.4.1",
+ "hyper-util",
+ "pin-project-lite",
+ "tokio",
+ "tower-service",
+]
+
[[package]]
name = "hyper-tls"
version = "0.5.0"
@@ -5845,7 +5860,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0"
dependencies = [
"bytes",
"http-body-util",
- "hyper 1.1.0",
+ "hyper 1.4.1",
"hyper-util",
"native-tls",
"tokio",
@@ -5855,16 +5870,16 @@ dependencies = [
[[package]]
name = "hyper-util"
-version = "0.1.3"
+version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa"
+checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956"
dependencies = [
"bytes",
"futures-channel",
"futures-util",
"http 1.1.0",
"http-body 1.0.0",
- "hyper 1.1.0",
+ "hyper 1.4.1",
"pin-project-lite",
"socket2 0.5.6",
"tokio",
@@ -5977,8 +5992,8 @@ dependencies = [
[[package]]
name = "icelake"
-version = "0.0.10"
-source = "git+https://github.com/icelake-io/icelake?rev=07d53893d7788b4e41fc11efad8a6be828405c31#07d53893d7788b4e41fc11efad8a6be828405c31"
+version = "0.3.141592654"
+source = "git+https://github.com/risingwavelabs/icelake.git?rev=1860eb315183a5f3f72b4097c1e40d49407f8373#1860eb315183a5f3f72b4097c1e40d49407f8373"
dependencies = [
"anyhow",
"apache-avro 0.17.0",
@@ -6814,13 +6829,13 @@ dependencies = [
[[package]]
name = "madsim-etcd-client"
-version = "0.4.0+0.12.1"
+version = "0.6.0+0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02b4b5de48bb7f3f7eae0bca62b3ed0b7d714b1b273d7347329b92c3a2eef113"
+checksum = "8edcf23498cb590e415ce2ba6c7f186c7aa3340e7aa716ddddb34faf0a9ffdfb"
dependencies = [
"etcd-client",
"futures-util",
- "http 0.2.9",
+ "http 1.1.0",
"madsim",
"serde",
"serde_with 3.8.0",
@@ -6828,7 +6843,7 @@ dependencies = [
"thiserror",
"tokio",
"toml 0.8.12",
- "tonic 0.10.2",
+ "tonic 0.12.1",
"tracing",
]
@@ -6881,29 +6896,29 @@ dependencies = [
[[package]]
name = "madsim-tonic"
-version = "0.4.1+0.10.0"
+version = "0.5.1+0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "813977c7870103e113a0332d97731f961bc48aaa8860edd318ef7d7754214436"
+checksum = "61c668c82f0c2aca7ffed3235047f2539e6e41278c7c47a822999f3b7a067887"
dependencies = [
"async-stream",
"chrono",
"futures-util",
"madsim",
"tokio",
- "tonic 0.10.2",
+ "tonic 0.12.1",
"tower",
"tracing",
]
[[package]]
name = "madsim-tonic-build"
-version = "0.4.2+0.10.0"
+version = "0.5.0+0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a2ad2776ba20221ccbe4e136e2fa0f7ab90eebd608373177f3e74a198a288ec"
+checksum = "f271a476bbaa9d2139e1e1a5beb869c6119e805a0b67ad2b2857e4a8785b111a"
dependencies = [
"prettyplease 0.2.15",
"proc-macro2",
- "prost-build 0.12.1",
+ "prost-build 0.13.1",
"quote",
"syn 2.0.66",
"tonic-build",
@@ -7851,15 +7866,6 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
-[[package]]
-name = "openssl-src"
-version = "300.3.1+3.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91"
-dependencies = [
- "cc",
-]
-
[[package]]
name = "openssl-sys"
version = "0.9.103"
@@ -7868,7 +7874,6 @@ checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6"
dependencies = [
"cc",
"libc",
- "openssl-src",
"pkg-config",
"vcpkg",
]
@@ -7942,21 +7947,7 @@ dependencies = [
"rand",
"thiserror",
"tokio",
- "tokio-stream",
-]
-
-[[package]]
-name = "opentls"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f561874f8d6ecfb674fc08863414040c93cc90c0b6963fe679895fab8b65560"
-dependencies = [
- "futures-util",
- "log",
- "openssl",
- "openssl-probe",
- "openssl-sys",
- "url",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -8009,20 +8000,20 @@ dependencies = [
[[package]]
name = "otlp-embedded"
version = "0.0.1"
-source = "git+https://github.com/risingwavelabs/otlp-embedded?rev=492c244e0be91feb659c0cd48a624bbd96045a33#492c244e0be91feb659c0cd48a624bbd96045a33"
+source = "git+https://github.com/risingwavelabs/otlp-embedded?rev=e6cd165b9bc85783b42c106e99186b86b73e3507#e6cd165b9bc85783b42c106e99186b86b73e3507"
dependencies = [
"axum 0.7.4",
"datasize",
"hex",
- "itertools 0.12.1",
- "madsim-tonic",
- "madsim-tonic-build",
- "prost 0.12.1",
+ "itertools 0.13.0",
+ "prost 0.13.1",
"rust-embed",
"schnellru",
"serde",
"serde_json",
"tokio",
+ "tonic 0.12.1",
+ "tonic-build",
"tracing",
]
@@ -8959,6 +8950,16 @@ dependencies = [
"prost-derive 0.12.1",
]
+[[package]]
+name = "prost"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc"
+dependencies = [
+ "bytes",
+ "prost-derive 0.13.1",
+]
+
[[package]]
name = "prost-build"
version = "0.11.9"
@@ -9003,6 +9004,27 @@ dependencies = [
"which",
]
+[[package]]
+name = "prost-build"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5bb182580f71dd070f88d01ce3de9f4da5021db7115d2e1c3605a754153b77c1"
+dependencies = [
+ "bytes",
+ "heck 0.5.0",
+ "itertools 0.13.0",
+ "log",
+ "multimap 0.10.0",
+ "once_cell",
+ "petgraph",
+ "prettyplease 0.2.15",
+ "prost 0.13.1",
+ "prost-types 0.13.1",
+ "regex",
+ "syn 2.0.66",
+ "tempfile",
+]
+
[[package]]
name = "prost-derive"
version = "0.11.9"
@@ -9029,6 +9051,19 @@ dependencies = [
"syn 2.0.66",
]
+[[package]]
+name = "prost-derive"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca"
+dependencies = [
+ "anyhow",
+ "itertools 0.13.0",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.66",
+]
+
[[package]]
name = "prost-helpers"
version = "0.1.0"
@@ -9040,13 +9075,13 @@ dependencies = [
[[package]]
name = "prost-reflect"
-version = "0.13.0"
+version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ae9372e3227f3685376a0836e5c248611eafc95a0be900d44bc6cdf225b700f"
+checksum = "55a6a9143ae25c25fa7b6a48d6cc08b10785372060009c25140a4e7c340e95af"
dependencies = [
"once_cell",
- "prost 0.12.1",
- "prost-types 0.12.1",
+ "prost 0.13.1",
+ "prost-types 0.13.1",
]
[[package]]
@@ -9067,6 +9102,15 @@ dependencies = [
"prost 0.12.1",
]
+[[package]]
+name = "prost-types"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2"
+dependencies = [
+ "prost 0.13.1",
+]
+
[[package]]
name = "protobuf"
version = "2.28.0"
@@ -9184,7 +9228,7 @@ dependencies = [
"indoc",
"libc",
"memoffset",
- "parking_lot 0.11.2",
+ "parking_lot 0.12.1",
"portable-atomic",
"pyo3-build-config",
"pyo3-ffi",
@@ -9647,7 +9691,7 @@ dependencies = [
"http 1.1.0",
"http-body 1.0.0",
"http-body-util",
- "hyper 1.1.0",
+ "hyper 1.4.1",
"hyper-rustls 0.26.0",
"hyper-tls 0.6.0",
"hyper-util",
@@ -9837,7 +9881,7 @@ dependencies = [
"bytes",
"itertools 0.12.1",
"parking_lot 0.12.1",
- "prost 0.12.1",
+ "prost 0.13.1",
"risingwave_common",
"risingwave_hummock_sdk",
"risingwave_meta_model_v2",
@@ -9879,7 +9923,7 @@ dependencies = [
"parquet 52.0.0",
"paste",
"prometheus",
- "prost 0.12.1",
+ "prost 0.13.1",
"rand",
"risingwave_common",
"risingwave_common_estimate_size",
@@ -9899,7 +9943,7 @@ dependencies = [
"thiserror-ext",
"tikv-jemallocator",
"tokio-metrics",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tokio-util",
"tracing",
"twox-hash",
@@ -9942,7 +9986,7 @@ dependencies = [
"serde",
"serde_yaml",
"thiserror-ext",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"toml 0.8.12",
"tracing",
"tracing-subscriber",
@@ -10048,7 +10092,7 @@ dependencies = [
"governor",
"hashbrown 0.14.3",
"hex",
- "http 0.2.9",
+ "http 1.1.0",
"http-body 0.4.5",
"humantime",
"hytra",
@@ -10074,7 +10118,7 @@ dependencies = [
"pretty_assertions",
"procfs 0.16.0",
"prometheus",
- "prost 0.12.1",
+ "prost 0.13.1",
"rand",
"regex",
"reqwest 0.12.4",
@@ -10156,14 +10200,18 @@ dependencies = [
name = "risingwave_common_metrics"
version = "1.11.0-alpha"
dependencies = [
+ "auto_impl",
"bytes",
"clap",
"darwin-libproc",
"easy-ext",
"futures",
"http 0.2.9",
- "http-body 0.4.5",
+ "http 1.1.0",
+ "http-body 1.0.0",
"hyper 0.14.27",
+ "hyper 1.4.1",
+ "hyper-util",
"hytra",
"itertools 0.12.1",
"libc",
@@ -10204,7 +10252,7 @@ dependencies = [
"anyhow",
"bincode 1.3.3",
"parking_lot 0.12.1",
- "prost 0.12.1",
+ "prost 0.13.1",
"risingwave_pb",
"serde",
"thiserror",
@@ -10219,7 +10267,7 @@ dependencies = [
"async-trait",
"axum 0.7.4",
"futures",
- "hyper 0.14.27",
+ "http 1.1.0",
"madsim-tokio",
"madsim-tonic",
"prometheus",
@@ -10273,7 +10321,7 @@ dependencies = [
"madsim-tokio",
"madsim-tonic",
"parking_lot 0.12.1",
- "prost 0.12.1",
+ "prost 0.13.1",
"risingwave_common",
"risingwave_common_heap_profiling",
"risingwave_common_service",
@@ -10299,14 +10347,15 @@ dependencies = [
"foyer",
"futures",
"futures-async-stream",
- "hyper 0.14.27",
+ "http 1.1.0",
+ "hyper 1.4.1",
"itertools 0.12.1",
"madsim-tokio",
"madsim-tonic",
"maplit",
"pprof",
"prometheus",
- "prost 0.12.1",
+ "prost 0.13.1",
"rand",
"risingwave_batch",
"risingwave_common",
@@ -10325,7 +10374,7 @@ dependencies = [
"tempfile",
"thiserror-ext",
"tikv-jemalloc-ctl",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tower",
"tracing",
"uuid",
@@ -10412,10 +10461,10 @@ dependencies = [
"postgres-openssl",
"pretty_assertions",
"prometheus",
- "prost 0.12.1",
+ "prost 0.13.1",
"prost-build 0.12.1",
"prost-reflect",
- "prost-types 0.12.1",
+ "prost-types 0.13.1",
"protobuf-native",
"protobuf-src",
"pulsar",
@@ -10455,7 +10504,7 @@ dependencies = [
"time",
"tokio-postgres",
"tokio-retry",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tokio-util",
"tracing",
"tracing-subscriber",
@@ -10513,7 +10562,7 @@ dependencies = [
"madsim-tokio",
"madsim-tonic",
"memcomparable",
- "prost 0.12.1",
+ "prost 0.13.1",
"regex",
"risingwave_common",
"risingwave_connector",
@@ -10753,7 +10802,7 @@ dependencies = [
"pretty-xmlish",
"pretty_assertions",
"prometheus",
- "prost 0.12.1",
+ "prost 0.13.1",
"rand",
"risingwave_batch",
"risingwave_common",
@@ -10780,7 +10829,7 @@ dependencies = [
"tempfile",
"thiserror",
"thiserror-ext",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tracing",
"uuid",
"workspace-hack",
@@ -10805,7 +10854,7 @@ dependencies = [
"hex",
"itertools 0.12.1",
"parse-display",
- "prost 0.12.1",
+ "prost 0.13.1",
"risingwave_common",
"risingwave_common_estimate_size",
"risingwave_pb",
@@ -10863,7 +10912,7 @@ dependencies = [
"madsim-tokio",
"mockall",
"parking_lot 0.12.1",
- "prost 0.12.1",
+ "prost 0.13.1",
"risingwave_common",
"risingwave_hummock_sdk",
"risingwave_pb",
@@ -10882,7 +10931,7 @@ dependencies = [
"futures",
"jni",
"madsim-tokio",
- "prost 0.12.1",
+ "prost 0.13.1",
"risingwave_common",
"risingwave_expr",
"risingwave_hummock_sdk",
@@ -10912,7 +10961,7 @@ dependencies = [
"jni",
"madsim-tokio",
"paste",
- "prost 0.12.1",
+ "prost 0.13.1",
"risingwave_common",
"risingwave_expr",
"risingwave_hummock_sdk",
@@ -10979,7 +11028,7 @@ dependencies = [
"function_name",
"futures",
"hex",
- "hyper 0.14.27",
+ "http 1.1.0",
"itertools 0.12.1",
"jsonbb",
"madsim-etcd-client",
@@ -10994,7 +11043,7 @@ dependencies = [
"parking_lot 0.12.1",
"prometheus",
"prometheus-http-query",
- "prost 0.12.1",
+ "prost 0.13.1",
"rand",
"risingwave_backup",
"risingwave_common",
@@ -11021,7 +11070,7 @@ dependencies = [
"thiserror",
"thiserror-ext",
"tokio-retry",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tower",
"tower-http",
"tracing",
@@ -11067,7 +11116,7 @@ dependencies = [
name = "risingwave_meta_model_v2"
version = "1.11.0-alpha"
dependencies = [
- "prost 0.12.1",
+ "prost 0.13.1",
"risingwave_common",
"risingwave_hummock_sdk",
"risingwave_pb",
@@ -11121,7 +11170,7 @@ dependencies = [
"itertools 0.12.1",
"madsim-tokio",
"madsim-tonic",
- "prost 0.12.1",
+ "prost 0.13.1",
"rand",
"regex",
"risingwave_common",
@@ -11134,7 +11183,7 @@ dependencies = [
"serde_json",
"sync-point",
"thiserror-ext",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tracing",
"workspace-hack",
]
@@ -11186,8 +11235,8 @@ dependencies = [
"madsim-tonic-build",
"pbjson",
"pbjson-build",
- "prost 0.12.1",
- "prost-build 0.12.1",
+ "prost 0.13.1",
+ "prost-build 0.13.1",
"prost-helpers",
"risingwave_error",
"serde",
@@ -11242,8 +11291,8 @@ dependencies = [
"easy-ext",
"either",
"futures",
- "http 0.2.9",
- "hyper 0.14.27",
+ "http 1.1.0",
+ "hyper 1.4.1",
"itertools 0.12.1",
"lru 0.7.6",
"madsim-tokio",
@@ -11260,7 +11309,7 @@ dependencies = [
"thiserror",
"thiserror-ext",
"tokio-retry",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tower",
"tracing",
"url",
@@ -11291,6 +11340,7 @@ dependencies = [
"rlimit",
"thiserror-ext",
"time",
+ "tokio",
"tracing",
"tracing-opentelemetry",
"tracing-subscriber",
@@ -11345,7 +11395,7 @@ dependencies = [
"tempfile",
"tikv-jemallocator",
"tokio-postgres",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tracing",
"tracing-subscriber",
]
@@ -11413,7 +11463,7 @@ dependencies = [
"serde",
"serde_with 3.8.0",
"tokio-postgres",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"toml 0.8.12",
"tracing",
"workspace-hack",
@@ -11460,7 +11510,7 @@ dependencies = [
"parking_lot 0.12.1",
"procfs 0.16.0",
"prometheus",
- "prost 0.12.1",
+ "prost 0.13.1",
"rand",
"risingwave_backup",
"risingwave_common",
@@ -11529,7 +11579,7 @@ dependencies = [
"pin-project",
"prehash",
"prometheus",
- "prost 0.12.1",
+ "prost 0.13.1",
"rand",
"risingwave_common",
"risingwave_common_estimate_size",
@@ -11554,7 +11604,7 @@ dependencies = [
"thiserror-ext",
"tokio-metrics",
"tokio-retry",
- "tokio-stream",
+ "tokio-stream 0.1.15 (git+https://github.com/madsim-rs/tokio.git?rev=0dd1055)",
"tracing",
"tracing-test",
"workspace-hack",
@@ -12100,9 +12150,9 @@ dependencies = [
[[package]]
name = "schnellru"
-version = "0.2.1"
+version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d"
+checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367"
dependencies = [
"ahash 0.8.11",
"cfg-if",
@@ -13145,7 +13195,7 @@ dependencies = [
"thiserror",
"time",
"tokio",
- "tokio-stream",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"tracing",
"url",
"uuid",
@@ -13740,13 +13790,15 @@ dependencies = [
"futures-util",
"num-traits",
"once_cell",
- "opentls",
"pin-project-lite",
"pretty-hex",
"rust_decimal",
+ "rustls-native-certs 0.6.3",
+ "rustls-pemfile 1.0.4",
"thiserror",
"time",
"tokio",
+ "tokio-rustls 0.24.1",
"tokio-util",
"tracing",
"uuid",
@@ -13849,9 +13901,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.37.0"
+version = "1.38.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
+checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a"
dependencies = [
"backtrace",
"bytes",
@@ -13879,9 +13931,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
-version = "2.2.0"
+version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
+checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
dependencies = [
"proc-macro2",
"quote",
@@ -13897,7 +13949,7 @@ dependencies = [
"futures-util",
"pin-project-lite",
"tokio",
- "tokio-stream",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -13991,8 +14043,19 @@ dependencies = [
[[package]]
name = "tokio-stream"
-version = "0.1.14"
-source = "git+https://github.com/madsim-rs/tokio.git?rev=fe39bb8e#fe39bb8e8ab0ed96ee1b4477ab5508c20ce017fb"
+version = "0.1.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af"
+dependencies = [
+ "futures-core",
+ "pin-project-lite",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-stream"
+version = "0.1.15"
+source = "git+https://github.com/madsim-rs/tokio.git?rev=0dd1055#0dd105567b323c863c29f794d2221ed588956d8d"
dependencies = [
"futures-core",
"madsim-tokio",
@@ -14095,12 +14158,11 @@ dependencies = [
"axum 0.6.20",
"base64 0.21.7",
"bytes",
- "flate2",
"h2 0.3.26",
"http 0.2.9",
"http-body 0.4.5",
"hyper 0.14.27",
- "hyper-timeout",
+ "hyper-timeout 0.4.1",
"percent-encoding",
"pin-project",
"prost 0.12.1",
@@ -14108,12 +14170,11 @@ dependencies = [
"rustls-pemfile 1.0.4",
"tokio",
"tokio-rustls 0.24.1",
- "tokio-stream",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"tower",
"tower-layer",
"tower-service",
"tracing",
- "webpki-roots 0.25.2",
]
[[package]]
@@ -14131,27 +14192,61 @@ dependencies = [
"http 0.2.9",
"http-body 0.4.5",
"hyper 0.14.27",
- "hyper-timeout",
+ "hyper-timeout 0.4.1",
"percent-encoding",
"pin-project",
"prost 0.12.1",
"tokio",
- "tokio-stream",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"tower",
"tower-layer",
"tower-service",
"tracing",
]
+[[package]]
+name = "tonic"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401"
+dependencies = [
+ "async-stream",
+ "async-trait",
+ "axum 0.7.4",
+ "base64 0.22.0",
+ "bytes",
+ "flate2",
+ "h2 0.4.4",
+ "http 1.1.0",
+ "http-body 1.0.0",
+ "http-body-util",
+ "hyper 1.4.1",
+ "hyper-timeout 0.5.1",
+ "hyper-util",
+ "percent-encoding",
+ "pin-project",
+ "prost 0.13.1",
+ "rustls-pemfile 2.1.1",
+ "socket2 0.5.6",
+ "tokio",
+ "tokio-rustls 0.26.0",
+ "tokio-stream 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tower",
+ "tower-layer",
+ "tower-service",
+ "tracing",
+ "webpki-roots 0.26.1",
+]
+
[[package]]
name = "tonic-build"
-version = "0.10.2"
+version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889"
+checksum = "568392c5a2bd0020723e3f387891176aabafe36fd9fcd074ad309dfa0c8eb964"
dependencies = [
"prettyplease 0.2.15",
"proc-macro2",
- "prost-build 0.12.1",
+ "prost-build 0.13.1",
"quote",
"syn 2.0.66",
]
diff --git a/Cargo.toml b/Cargo.toml
index 53dfaef0ac595..5bfab4feb27fb 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -121,7 +121,7 @@ aws-smithy-types = { version = "1", default-features = false, features = [
aws-endpoint = "0.60"
aws-types = "1"
axum = "=0.7.4" # TODO: 0.7.5+ does not work with current toolchain
-etcd-client = { package = "madsim-etcd-client", version = "0.4" }
+etcd-client = { package = "madsim-etcd-client", version = "0.6" }
futures-async-stream = "0.2.9"
hytra = "0.1"
rdkafka = { package = "madsim-rdkafka", version = "0.4.1", features = [
@@ -129,12 +129,12 @@ rdkafka = { package = "madsim-rdkafka", version = "0.4.1", features = [
] }
hashbrown = { version = "0.14", features = ["ahash", "inline-more", "nightly"] }
criterion = { version = "0.5", features = ["async_futures"] }
-tonic = { package = "madsim-tonic", version = "0.4.1" }
-tonic-build = { package = "madsim-tonic-build", version = "0.4.2" }
-otlp-embedded = { git = "https://github.com/risingwavelabs/otlp-embedded", rev = "492c244e0be91feb659c0cd48a624bbd96045a33" }
-prost = { version = "0.12" }
-prost-build = { version = "0.12" }
-icelake = { git = "https://github.com/icelake-io/icelake", rev = "07d53893d7788b4e41fc11efad8a6be828405c31", features = [
+tonic = { package = "madsim-tonic", version = "0.5.1" }
+tonic-build = { package = "madsim-tonic-build", version = "0.5" }
+otlp-embedded = { git = "https://github.com/risingwavelabs/otlp-embedded", rev = "e6cd165b9bc85783b42c106e99186b86b73e3507" }
+prost = { version = "0.13" }
+prost-build = { version = "0.13" }
+icelake = { git = "https://github.com/risingwavelabs/icelake.git", rev = "1860eb315183a5f3f72b4097c1e40d49407f8373", features = [
"prometheus",
] }
arrow-array-iceberg = { package = "arrow-array", version = "52" }
@@ -180,6 +180,7 @@ tikv-jemallocator = { git = "https://github.com/risingwavelabs/jemallocator.git"
"profiling",
"stats",
], rev = "64a2d9" }
+# TODO(http-bump): bump to use tonic 0.12 once minitrace-opentelemetry is updated
opentelemetry = "0.23"
opentelemetry-otlp = "0.16"
opentelemetry_sdk = { version = "0.23", default-features = false }
@@ -195,6 +196,7 @@ sea-orm = { version = "0.12.14", features = [
"runtime-tokio-native-tls",
] }
sqlx = "0.7"
+tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "0dd1055", features = ["net", "fs"] }
tokio-util = "0.7"
tracing-opentelemetry = "0.24"
rand = { version = "0.8", features = ["small_rng"] }
@@ -335,7 +337,9 @@ opt-level = 2
# Patch third-party crates for deterministic simulation.
quanta = { git = "https://github.com/madsim-rs/quanta.git", rev = "948bdc3" }
getrandom = { git = "https://github.com/madsim-rs/getrandom.git", rev = "e79a7ae" }
-tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "fe39bb8e" }
+# Don't patch `tokio-stream`, but only use the madsim version for **direct** dependencies.
+# Imagine an unpatched dependency depends on the original `tokio` and the patched `tokio-stream`.
+# tokio-stream = { git = "https://github.com/madsim-rs/tokio.git", rev = "0dd1055" }
tokio-retry = { git = "https://github.com/madsim-rs/rust-tokio-retry.git", rev = "95e2fd3" }
tokio-postgres = { git = "https://github.com/madsim-rs/rust-postgres.git", rev = "ac00d88" }
futures-timer = { git = "https://github.com/madsim-rs/futures-timer.git", rev = "05b33b4" }
diff --git a/README.md b/README.md
index bf50ae208a972..07d12e99223ef 100644
--- a/README.md
+++ b/README.md
@@ -56,7 +56,7 @@
RisingWave is a Postgres-compatible SQL engine engineered to provide the simplest and most cost-efficient approach for processing, analyzing, and managing real-time event streaming data.
-![RisingWave](https://github.com/risingwavelabs/risingwave/assets/41638002/10c44404-f78b-43ce-bbd9-3646690acc59)
+![RisingWave](./docs/dev/src/images/architecture_20240814.png)
## When to use RisingWave?
RisingWave can ingest millions of events per second, continuously join live data streams with historical tables, and serve ad-hoc queries in real-time. Typical use cases include, but are not limited to:
diff --git a/ci/Dockerfile b/ci/Dockerfile
index b14454859a791..95dbb5205c754 100644
--- a/ci/Dockerfile
+++ b/ci/Dockerfile
@@ -11,7 +11,7 @@ ENV LANG en_US.utf8
# Use AWS ubuntu mirror
RUN sed -i 's|http://archive.ubuntu.com/ubuntu|http://us-east-2.ec2.archive.ubuntu.com/ubuntu/|g' /etc/apt/sources.list
RUN apt-get update -yy && \
- DEBIAN_FRONTEND=noninteractive apt-get -y install sudo make build-essential cmake protobuf-compiler curl parallel python3 python3-pip python3-venv software-properties-common \
+ DEBIAN_FRONTEND=noninteractive apt-get -y install sudo make build-essential cmake protobuf-compiler curl parallel python3 python3-pip python3-venv software-properties-common psmisc \
openssl libssl-dev libsasl2-dev libcurl4-openssl-dev pkg-config bash openjdk-17-jdk wget unzip git tmux lld postgresql-client kcat netcat-openbsd mysql-client \
maven zstd libzstd-dev locales \
python3.12 python3.12-dev \
diff --git a/ci/build-ci-image.sh b/ci/build-ci-image.sh
index 6602509824e05..88542b4aa5f12 100755
--- a/ci/build-ci-image.sh
+++ b/ci/build-ci-image.sh
@@ -10,7 +10,7 @@ cat ../rust-toolchain
# shellcheck disable=SC2155
# REMEMBER TO ALSO UPDATE ci/docker-compose.yml
-export BUILD_ENV_VERSION=v20240731
+export BUILD_ENV_VERSION=v20240812
export BUILD_TAG="public.ecr.aws/w1p7b4n3/rw-build-env:${BUILD_ENV_VERSION}"
diff --git a/ci/docker-compose.yml b/ci/docker-compose.yml
index 78ad69c0995ab..4b1954ff5ae2c 100644
--- a/ci/docker-compose.yml
+++ b/ci/docker-compose.yml
@@ -71,7 +71,7 @@ services:
retries: 5
source-test-env:
- image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240731
+ image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240812
depends_on:
- mysql
- sqlserver-server
@@ -85,7 +85,7 @@ services:
- ..:/risingwave
sink-test-env:
- image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240731
+ image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240812
depends_on:
- mysql
- db
@@ -108,12 +108,12 @@ services:
rw-build-env:
- image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240731
+ image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240812
volumes:
- ..:/risingwave
ci-flamegraph-env:
- image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240731
+ image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240812
# NOTE(kwannoel): This is used in order to permit
# syscalls for `nperf` (perf_event_open),
# so it can do CPU profiling.
@@ -124,7 +124,7 @@ services:
- ..:/risingwave
regress-test-env:
- image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240731
+ image: public.ecr.aws/w1p7b4n3/rw-build-env:v20240812
depends_on:
db:
condition: service_healthy
@@ -266,7 +266,6 @@ services:
SCHEMA_REGISTRY_HOST_NAME: schemaregistry
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry:8082
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: message_queue:29092
- SCHEMA_REGISTRY_DEBUG: 'true'
pulsar-server:
container_name: pulsar-server
diff --git a/ci/scripts/common.sh b/ci/scripts/common.sh
index ac64d1a7a89cc..176c10b4ebc4f 100755
--- a/ci/scripts/common.sh
+++ b/ci/scripts/common.sh
@@ -97,7 +97,6 @@ function filter_stack_trace() {
touch tmp
cat "$1" \
| sed -E '/ [1-9][0-9]+:/d' \
- | sed -E '/ [3-9]+:/d' \
| sed -E '/ at .rustc/d' \
| sed -E '/ at ...cargo/d' > tmp
cp tmp "$1"
diff --git a/ci/scripts/deterministic-recovery-test.sh b/ci/scripts/deterministic-recovery-test.sh
index 1a400d4ade9e0..2afe2f03b956b 100755
--- a/ci/scripts/deterministic-recovery-test.sh
+++ b/ci/scripts/deterministic-recovery-test.sh
@@ -9,7 +9,7 @@ echo "--- Download artifacts"
download-and-decompress-artifact risingwave_simulation .
chmod +x ./risingwave_simulation
-export RUST_LOG="risingwave_meta::barrier::recovery=debug,\
+export RUST_LOG="info,risingwave_meta::barrier::recovery=debug,\
risingwave_meta::manager::catalog=debug,\
risingwave_meta::rpc::ddl_controller=debug,\
risingwave_meta::barrier::mod=debug,\
diff --git a/ci/scripts/gen-integration-test-yaml.py b/ci/scripts/gen-integration-test-yaml.py
index c778205cfbb3e..e3a8e632709c1 100644
--- a/ci/scripts/gen-integration-test-yaml.py
+++ b/ci/scripts/gen-integration-test-yaml.py
@@ -65,6 +65,7 @@ def gen_pipeline_steps():
env:
GHCR_USERNAME: ghcr-username
GHCR_TOKEN: ghcr-token
+ RW_LICENSE_KEY: rw-license-key
- ./ci/plugins/docker-compose-logs
"""
return pipeline_steps
diff --git a/ci/scripts/run-e2e-test.sh b/ci/scripts/run-e2e-test.sh
index b4c00cec53fe8..c0f8e9f387d61 100755
--- a/ci/scripts/run-e2e-test.sh
+++ b/ci/scripts/run-e2e-test.sh
@@ -77,7 +77,7 @@ mv target/debug/risingwave_e2e_extended_mode_test-"$profile" target/debug/rising
chmod +x ./target/debug/risingwave_e2e_extended_mode_test
echo "--- e2e, $mode, streaming"
-RUST_LOG="info,risingwave_stream=info,risingwave_batch=info,risingwave_storage=info" \
+RUST_LOG="info,risingwave_stream=info,risingwave_batch=info,risingwave_storage=info,risingwave_stream::common::table::state_table=warn" \
cluster_start
# Please make sure the regression is expected before increasing the timeout.
sqllogictest -p 4566 -d dev './e2e_test/streaming/**/*.slt' --junit "streaming-${profile}"
diff --git a/ci/scripts/single-node-utils.sh b/ci/scripts/single-node-utils.sh
index f882084197af8..852cd2099c2fe 100755
--- a/ci/scripts/single-node-utils.sh
+++ b/ci/scripts/single-node-utils.sh
@@ -19,7 +19,7 @@ start_single_node() {
}
stop_single_node() {
- pkill risingwave
+ killall --wait risingwave
rm -rf "$HOME/.risingwave/state_store"
rm -rf "$HOME/.risingwave/meta_store"
}
@@ -47,7 +47,6 @@ wait_single_node() {
restart_single_node() {
stop_single_node
- sleep 5
start_single_node "$PREFIX_LOG"/single-node-restarted.log &
wait_single_node
}
diff --git a/ci/scripts/sql/nexmark/q0-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q0-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..ab23d53d1251d
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q0-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q0_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q0-temporal-filter.sql b/ci/scripts/sql/nexmark/q0-temporal-filter.sql
new file mode 100644
index 0000000000000..0cdbb4062caec
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q0-temporal-filter.sql
@@ -0,0 +1,7 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q0_temporal_filter
+AS
+SELECT auction, bidder, price, date_time
+FROM bid_filtered
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q0.sql b/ci/scripts/sql/nexmark/q0.sql
index c07afe673e834..1b90c9391f37c 100644
--- a/ci/scripts/sql/nexmark/q0.sql
+++ b/ci/scripts/sql/nexmark/q0.sql
@@ -4,4 +4,4 @@ CREATE SINK nexmark_q0
AS
SELECT auction, bidder, price, date_time
FROM bid
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q1-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q1-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..368b0be99ddf3
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q1-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q1_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q1-temporal-filter.sql b/ci/scripts/sql/nexmark/q1-temporal-filter.sql
new file mode 100644
index 0000000000000..20594f6a15347
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q1-temporal-filter.sql
@@ -0,0 +1,10 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q1_temporal_filter
+AS
+SELECT auction,
+ bidder,
+ 0.908 * price as price,
+ date_time
+FROM bid_filtered
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q1.sql b/ci/scripts/sql/nexmark/q1.sql
index 4e38b643928dd..503485046696a 100644
--- a/ci/scripts/sql/nexmark/q1.sql
+++ b/ci/scripts/sql/nexmark/q1.sql
@@ -7,4 +7,4 @@ SELECT auction,
0.908 * price as price,
date_time
FROM bid
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q10-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q10-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..994438f15a6a2
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q10-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q10_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q10-temporal-filter.sql b/ci/scripts/sql/nexmark/q10-temporal-filter.sql
new file mode 100644
index 0000000000000..3914ec575180b
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q10-temporal-filter.sql
@@ -0,0 +1,11 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q10_temporal_filter AS
+SELECT auction,
+ bidder,
+ price,
+ date_time,
+ TO_CHAR(date_time, 'YYYY-MM-DD') as date,
+ TO_CHAR(date_time, 'HH:MI') as time
+FROM bid_filtered
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q10.sql b/ci/scripts/sql/nexmark/q10.sql
index e20a590ecf108..e155dce0a62b3 100644
--- a/ci/scripts/sql/nexmark/q10.sql
+++ b/ci/scripts/sql/nexmark/q10.sql
@@ -8,4 +8,4 @@ SELECT auction,
TO_CHAR(date_time, 'YYYY-MM-DD') as date,
TO_CHAR(date_time, 'HH:MI') as time
FROM bid
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q101-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q101-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..bac9715316da1
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q101-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q101_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q101-temporal-filter.sql b/ci/scripts/sql/nexmark/q101-temporal-filter.sql
new file mode 100644
index 0000000000000..0f7bb7c048220
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q101-temporal-filter.sql
@@ -0,0 +1,16 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q101_temporal_filter AS
+SELECT
+ a.id AS auction_id,
+ a.item_name AS auction_item_name,
+ b.max_price AS current_highest_bid
+FROM auction a
+LEFT OUTER JOIN (
+ SELECT
+ b1.auction,
+ MAX(b1.price) max_price
+ FROM bid_filtered b1
+ GROUP BY b1.auction
+) b ON a.id = b.auction
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q102-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q102-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..748a1d388600d
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q102-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q102_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q102-temporal-filter.sql b/ci/scripts/sql/nexmark/q102-temporal-filter.sql
new file mode 100644
index 0000000000000..d517aacc1c383
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q102-temporal-filter.sql
@@ -0,0 +1,14 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q102_temporal_filter AS
+SELECT
+ a.id AS auction_id,
+ a.item_name AS auction_item_name,
+ COUNT(b.auction) AS bid_count
+FROM auction a
+JOIN bid_filtered b ON a.id = b.auction
+GROUP BY a.id, a.item_name
+HAVING COUNT(b.auction) >= (
+ SELECT COUNT(*) / COUNT(DISTINCT auction) FROM bid_filtered
+)
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q103-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q103-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..ebae2ca09a33e
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q103-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q103_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q103-temporal-filter.sql b/ci/scripts/sql/nexmark/q103-temporal-filter.sql
new file mode 100644
index 0000000000000..e415e914b9720
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q103-temporal-filter.sql
@@ -0,0 +1,13 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q103_temporal_filter AS
+SELECT
+ a.id AS auction_id,
+ a.item_name AS auction_item_name
+FROM auction a
+WHERE a.id IN (
+ SELECT b.auction FROM bid_filtered b
+ GROUP BY b.auction
+ HAVING COUNT(*) >= 20
+)
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q104-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q104-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..f2a066f42c770
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q104-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q104_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q104-temporal-filter.sql b/ci/scripts/sql/nexmark/q104-temporal-filter.sql
new file mode 100644
index 0000000000000..6c6145a03f2cc
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q104-temporal-filter.sql
@@ -0,0 +1,13 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q104_temporal_filter AS
+SELECT
+ a.id AS auction_id,
+ a.item_name AS auction_item_name
+FROM auction a
+WHERE a.id NOT IN (
+ SELECT b.auction FROM bid_filtered b
+ GROUP BY b.auction
+ HAVING COUNT(*) < 20
+)
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q105-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q105-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..e3ebe8ecc61f5
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q105-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q105_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q105-temporal-filter.sql b/ci/scripts/sql/nexmark/q105-temporal-filter.sql
new file mode 100644
index 0000000000000..8862ecfbc33dc
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q105-temporal-filter.sql
@@ -0,0 +1,13 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q105_temporal_filter AS
+SELECT
+ a.id AS auction_id,
+ a.item_name AS auction_item_name,
+ COUNT(b.auction) AS bid_count
+FROM auction a
+JOIN bid_filtered b ON a.id = b.auction
+GROUP BY a.id, a.item_name
+ORDER BY bid_count DESC
+LIMIT 1000
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q105-without-limit-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q105-without-limit-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..aff265cb37c48
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q105-without-limit-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q105_without_limit_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q105-without-limit-temporal-filter.sql b/ci/scripts/sql/nexmark/q105-without-limit-temporal-filter.sql
new file mode 100644
index 0000000000000..d79a78db866e1
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q105-without-limit-temporal-filter.sql
@@ -0,0 +1,11 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q105_without_limit_temporal_filter AS
+SELECT
+ a.id AS auction_id,
+ a.item_name AS auction_item_name,
+ COUNT(b.auction) AS bid_count
+FROM auction a
+JOIN bid_filtered b ON a.id = b.auction
+GROUP BY a.id, a.item_name
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q105-without-limit.drop.sql b/ci/scripts/sql/nexmark/q105-without-limit.drop.sql
new file mode 100644
index 0000000000000..b43a6c4b1eedf
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q105-without-limit.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q105_without_limit;
diff --git a/ci/scripts/sql/nexmark/q105-without-limit.sql b/ci/scripts/sql/nexmark/q105-without-limit.sql
new file mode 100644
index 0000000000000..ea68188ab86e8
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q105-without-limit.sql
@@ -0,0 +1,11 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q105_without_limit AS
+SELECT
+ a.id AS auction_id,
+ a.item_name AS auction_item_name,
+ COUNT(b.auction) AS bid_count
+FROM auction a
+JOIN bid b ON a.id = b.auction
+GROUP BY a.id, a.item_name
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q106.drop.sql b/ci/scripts/sql/nexmark/q106.drop.sql
new file mode 100644
index 0000000000000..f651e37253804
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q106.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q106;
diff --git a/ci/scripts/sql/nexmark/q106.sql b/ci/scripts/sql/nexmark/q106.sql
new file mode 100644
index 0000000000000..79c22f9345876
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q106.sql
@@ -0,0 +1,21 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q106
+AS
+SELECT
+ MIN(final) AS min_final
+FROM
+ (
+ SELECT
+ auction.id,
+ MAX(price) AS final
+ FROM
+ auction,
+ bid
+ WHERE
+ bid.auction = auction.id
+ AND bid.date_time BETWEEN auction.date_time AND auction.expires
+ GROUP BY
+ auction.id
+ )
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q107.drop.sql b/ci/scripts/sql/nexmark/q107.drop.sql
new file mode 100644
index 0000000000000..204d81cea0e49
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q107.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q107;
diff --git a/ci/scripts/sql/nexmark/q107.sql b/ci/scripts/sql/nexmark/q107.sql
new file mode 100644
index 0000000000000..f1d354fcf98a7
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q107.sql
@@ -0,0 +1,11 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q107 AS
+SELECT
+ approx_percentile(0.01, 0.01) within group (order by price) as p01,
+ approx_percentile(0.1, 0.01) within group (order by price) as p10,
+ approx_percentile(0.5, 0.01) within group (order by price) as p50,
+ approx_percentile(0.9, 0.01) within group (order by price) as p90,
+ approx_percentile(0.99, 0.01) within group (order by price) as p99
+FROM bid
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q108.drop.sql b/ci/scripts/sql/nexmark/q108.drop.sql
new file mode 100644
index 0000000000000..4f47359cd7c78
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q108.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q108;
diff --git a/ci/scripts/sql/nexmark/q108.sql b/ci/scripts/sql/nexmark/q108.sql
new file mode 100644
index 0000000000000..4b471d0b78548
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q108.sql
@@ -0,0 +1,13 @@
+-- test two-phase simple approx percentile and merge
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q108 AS
+SELECT
+ approx_percentile(0.01, 0.01) within group (order by price) as p01,
+ approx_percentile(0.1, 0.01) within group (order by price) as p10,
+ approx_percentile(0.5, 0.01) within group (order by price) as p50,
+ approx_percentile(0.9, 0.01) within group (order by price) as p90,
+ approx_percentile(0.99, 0.01) within group (order by price) as p99
+FROM bid
+GROUP BY auction
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q12-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q12-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..5b0a91296c748
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q12-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q12_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q12-temporal-filter.sql b/ci/scripts/sql/nexmark/q12-temporal-filter.sql
new file mode 100644
index 0000000000000..d4874b913fe26
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q12-temporal-filter.sql
@@ -0,0 +1,7 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q12_temporal_filter AS
+SELECT bidder, count(*) as bid_count, window_start, window_end
+FROM TUMBLE(bid_filtered, p_time, INTERVAL '10' SECOND)
+GROUP BY bidder, window_start, window_end
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q13-by-row-id.drop.sql b/ci/scripts/sql/nexmark/q13-by-row-id.drop.sql
new file mode 100644
index 0000000000000..1349e9974365d
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q13-by-row-id.drop.sql
@@ -0,0 +1,4 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q13_by_row_id;
+DROP TABLE side_input;
diff --git a/ci/scripts/sql/nexmark/q13-by-row-id.sql b/ci/scripts/sql/nexmark/q13-by-row-id.sql
new file mode 100644
index 0000000000000..8baf39eda4651
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q13-by-row-id.sql
@@ -0,0 +1,12 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE TABLE side_input(
+ key BIGINT PRIMARY KEY,
+ value VARCHAR
+);
+INSERT INTO side_input SELECT v, v::varchar FROM generate_series(0, ${BENCHMARK_NEXMARK_RISINGWAVE_Q13_SIDE_INPUT_ROW_COUNT} - 1) AS s(v);
+
+CREATE SINK nexmark_q13_by_row_id AS
+SELECT B.auction, B.bidder, B.price, B.date_time, S.value
+FROM bid B join side_input FOR SYSTEM_TIME AS OF PROCTIME() S on mod(B._row_id::bigint, ${BENCHMARK_NEXMARK_RISINGWAVE_Q13_SIDE_INPUT_ROW_COUNT}) = S.key
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q13-non-lookup-cond.drop.sql b/ci/scripts/sql/nexmark/q13-non-lookup-cond.drop.sql
new file mode 100644
index 0000000000000..f950bf5da887a
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q13-non-lookup-cond.drop.sql
@@ -0,0 +1,4 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q13_non_lookup_cond;
+DROP TABLE side_input;
diff --git a/ci/scripts/sql/nexmark/q13-non-lookup-cond.sql b/ci/scripts/sql/nexmark/q13-non-lookup-cond.sql
new file mode 100644
index 0000000000000..4162b8ed7bea8
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q13-non-lookup-cond.sql
@@ -0,0 +1,14 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE TABLE side_input(
+ key BIGINT PRIMARY KEY,
+ value VARCHAR
+);
+INSERT INTO side_input SELECT v, v::varchar FROM generate_series(0, ${BENCHMARK_NEXMARK_RISINGWAVE_Q13_SIDE_INPUT_ROW_COUNT} - 1) AS s(v);
+
+CREATE SINK nexmark_q13_non_lookup_cond AS
+SELECT B.auction, B.bidder, B.price, B.date_time, S.value
+FROM bid B join side_input FOR SYSTEM_TIME AS OF PROCTIME() S
+ON mod(B.auction, ${BENCHMARK_NEXMARK_RISINGWAVE_Q13_SIDE_INPUT_ROW_COUNT}) = S.key
+ AND S.key % 4 != 1
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q13.sql b/ci/scripts/sql/nexmark/q13.sql
index d409de228ade5..2aee5ffab4f4a 100644
--- a/ci/scripts/sql/nexmark/q13.sql
+++ b/ci/scripts/sql/nexmark/q13.sql
@@ -1,11 +1,12 @@
-- noinspection SqlNoDataSourceInspectionForFile
-- noinspection SqlResolveForFile
CREATE TABLE side_input(
- key BIGINT PRIMARY KEY,
- value VARCHAR
+ key BIGINT PRIMARY KEY,
+ value VARCHAR
);
-INSERT INTO side_input SELECT i::bigint, i::varchar FROM(SELECT generate_series(0,9999,1) as i);
+INSERT INTO side_input SELECT v, v::varchar FROM generate_series(0, ${BENCHMARK_NEXMARK_RISINGWAVE_Q13_SIDE_INPUT_ROW_COUNT} - 1) AS s(v);
+
CREATE SINK nexmark_q13 AS
SELECT B.auction, B.bidder, B.price, B.date_time, S.value
-FROM bid B join side_input FOR SYSTEM_TIME AS OF PROCTIME() S on mod(B.auction, 10000) = S.key
-WITH ( connector = 'blackhole', type = 'append-only');
+FROM bid B join side_input FOR SYSTEM_TIME AS OF PROCTIME() S on mod(B.auction, ${BENCHMARK_NEXMARK_RISINGWAVE_Q13_SIDE_INPUT_ROW_COUNT}) = S.key
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q14-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q14-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..059ef57dee668
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q14-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q14_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q14-temporal-filter.sql b/ci/scripts/sql/nexmark/q14-temporal-filter.sql
new file mode 100644
index 0000000000000..d0aba0a8d5c55
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q14-temporal-filter.sql
@@ -0,0 +1,26 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q14_temporal_filter AS
+SELECT auction,
+ bidder,
+ 0.908 * price as price,
+ CASE
+ WHEN
+ extract(hour from date_time) >= 8 AND
+ extract(hour from date_time) <= 18
+ THEN 'dayTime'
+ WHEN
+ extract(hour from date_time) <= 6 OR
+ extract(hour from date_time) >= 20
+ THEN 'nightTime'
+ ELSE 'otherTime'
+ END AS bidTimeType,
+ date_time
+ -- extra
+ -- TODO: count_char is an UDF, add it back when we support similar functionality.
+ -- https://github.com/nexmark/nexmark/blob/master/nexmark-flink/src/main/java/com/github/nexmark/flink/udf/CountChar.java
+ -- count_char(extra, 'c') AS c_counts
+FROM bid_filtered
+WHERE 0.908 * price > 1000000
+ AND 0.908 * price < 50000000
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q14.drop.sql b/ci/scripts/sql/nexmark/q14.drop.sql
index 6cd0e12d317a0..b4f51674fb03b 100644
--- a/ci/scripts/sql/nexmark/q14.drop.sql
+++ b/ci/scripts/sql/nexmark/q14.drop.sql
@@ -1,4 +1,4 @@
-- noinspection SqlNoDataSourceInspectionForFile
-- noinspection SqlResolveForFile
DROP SINK nexmark_q14;
-DROP FUNCTION count_char;
\ No newline at end of file
+DROP FUNCTION count_char;
diff --git a/ci/scripts/sql/nexmark/q14.sql b/ci/scripts/sql/nexmark/q14.sql
index c5c174e3579ca..258f72c1ca7fb 100644
--- a/ci/scripts/sql/nexmark/q14.sql
+++ b/ci/scripts/sql/nexmark/q14.sql
@@ -1,16 +1,10 @@
-- noinspection SqlNoDataSourceInspectionForFile
-- noinspection SqlResolveForFile
-
-CREATE FUNCTION count_char(s varchar, c varchar) RETURNS int LANGUAGE javascript AS $$
- var count = 0;
- for (var cc of s) {
- if (cc === c) {
- count++;
- }
- }
- return count;
+CREATE FUNCTION count_char(s varchar, c varchar) RETURNS int LANGUAGE rust AS $$
+fn count_char(s: &str, c: &str) -> i32 {
+ s.matches(c).count() as i32
+}
$$;
-
CREATE SINK nexmark_q14 AS
SELECT auction,
bidder,
@@ -27,8 +21,9 @@ SELECT auction,
ELSE 'otherTime'
END AS bidTimeType,
date_time,
+ -- extra
count_char(extra, 'c') AS c_counts
FROM bid
WHERE 0.908 * price > 1000000
AND 0.908 * price < 50000000
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q15-no-distinct.drop.sql b/ci/scripts/sql/nexmark/q15-no-distinct.drop.sql
new file mode 100644
index 0000000000000..2a3680882ed28
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q15-no-distinct.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q15_no_distinct;
diff --git a/ci/scripts/sql/nexmark/q15-no-distinct.sql b/ci/scripts/sql/nexmark/q15-no-distinct.sql
new file mode 100644
index 0000000000000..a786285a6b8fd
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q15-no-distinct.sql
@@ -0,0 +1,20 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+SET rw_force_two_phase_agg = ${BENCHMARK_NEXMARK_RISINGWAVE_Q15_NO_DISTINCT_RW_FORCE_TWO_PHASE_AGG};
+CREATE SINK nexmark_q15_no_distinct AS
+SELECT to_char(date_time, 'YYYY-MM-DD') as "day",
+ count(*) AS total_bids,
+ count(*) filter (where price < 10000) AS rank1_bids,
+ count(*) filter (where price >= 10000 and price < 1000000) AS rank2_bids,
+ count(*) filter (where price >= 1000000) AS rank3_bids,
+ count(bidder) AS total_bidders,
+ count(bidder) filter (where price < 10000) AS rank1_bidders,
+ count(bidder) filter (where price >= 10000 and price < 1000000) AS rank2_bidders,
+ count(bidder) filter (where price >= 1000000) AS rank3_bidders,
+ count(auction) AS total_auctions,
+ count(auction) filter (where price < 10000) AS rank1_auctions,
+ count(auction) filter (where price >= 10000 and price < 1000000) AS rank2_auctions,
+ count(auction) filter (where price >= 1000000) AS rank3_auctions
+FROM bid
+GROUP BY to_char(date_time, 'YYYY-MM-DD')
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q15-simple.drop.sql b/ci/scripts/sql/nexmark/q15-simple.drop.sql
new file mode 100644
index 0000000000000..74ba29061ab05
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q15-simple.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q15_simple;
diff --git a/ci/scripts/sql/nexmark/q15-simple.sql b/ci/scripts/sql/nexmark/q15-simple.sql
new file mode 100644
index 0000000000000..d6e9b7e279207
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q15-simple.sql
@@ -0,0 +1,8 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q15_simple AS
+SELECT to_char(date_time, 'YYYY-MM-DD') as "day",
+ count(*) AS total_bids
+FROM bid
+GROUP BY to_char(date_time, 'YYYY-MM-DD')
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q15.sql b/ci/scripts/sql/nexmark/q15.sql
index cdee0bf20b83e..49c5b05e52457 100644
--- a/ci/scripts/sql/nexmark/q15.sql
+++ b/ci/scripts/sql/nexmark/q15.sql
@@ -1,5 +1,7 @@
-- noinspection SqlNoDataSourceInspectionForFile
-- noinspection SqlResolveForFile
+SET rw_force_split_distinct_agg = ${BENCHMARK_NEXMARK_RISINGWAVE_Q15_RW_FORCE_SPLIT_DISTINCT_AGG};
+SET rw_force_two_phase_agg = ${BENCHMARK_NEXMARK_RISINGWAVE_Q15_RW_FORCE_TWO_PHASE_AGG};
CREATE SINK nexmark_q15 AS
SELECT to_char(date_time, 'YYYY-MM-DD') as "day",
count(*) AS total_bids,
diff --git a/ci/scripts/sql/nexmark/q16-no-distinct.drop.sql b/ci/scripts/sql/nexmark/q16-no-distinct.drop.sql
new file mode 100644
index 0000000000000..8fc59bf972fa1
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q16-no-distinct.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q16_no_distinct;
diff --git a/ci/scripts/sql/nexmark/q16-no-distinct.sql b/ci/scripts/sql/nexmark/q16-no-distinct.sql
new file mode 100644
index 0000000000000..b1148fa66a43c
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q16-no-distinct.sql
@@ -0,0 +1,22 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+SET rw_force_two_phase_agg = ${BENCHMARK_NEXMARK_RISINGWAVE_Q16_NO_DISTINCT_RW_FORCE_TWO_PHASE_AGG};
+CREATE SINK nexmark_q16_no_distinct AS
+SELECT channel,
+ to_char(date_time, 'YYYY-MM-DD') as "day",
+ max(to_char(date_time, 'HH:mm')) as "minute",
+ count(*) AS total_bids,
+ count(*) filter (where price < 10000) AS rank1_bids,
+ count(*) filter (where price >= 10000 and price < 1000000) AS rank2_bids,
+ count(*) filter (where price >= 1000000) AS rank3_bids,
+ count(bidder) AS total_bidders,
+ count(bidder) filter (where price < 10000) AS rank1_bidders,
+ count(bidder) filter (where price >= 10000 and price < 1000000) AS rank2_bidders,
+ count(bidder) filter (where price >= 1000000) AS rank3_bidders,
+ count(auction) AS total_auctions,
+ count(auction) filter (where price < 10000) AS rank1_auctions,
+ count(auction) filter (where price >= 10000 and price < 1000000) AS rank2_auctions,
+ count(auction) filter (where price >= 1000000) AS rank3_auctions
+FROM bid
+GROUP BY to_char(date_time, 'YYYY-MM-DD'), channel
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q16.sql b/ci/scripts/sql/nexmark/q16.sql
index 1192a1f8af805..a6dcff0fb2316 100644
--- a/ci/scripts/sql/nexmark/q16.sql
+++ b/ci/scripts/sql/nexmark/q16.sql
@@ -1,5 +1,7 @@
-- noinspection SqlNoDataSourceInspectionForFile
-- noinspection SqlResolveForFile
+SET rw_force_split_distinct_agg = ${BENCHMARK_NEXMARK_RISINGWAVE_Q16_RW_FORCE_SPLIT_DISTINCT_AGG};
+SET rw_force_two_phase_agg = ${BENCHMARK_NEXMARK_RISINGWAVE_Q16_RW_FORCE_TWO_PHASE_AGG};
CREATE SINK nexmark_q16 AS
SELECT channel,
to_char(date_time, 'YYYY-MM-DD') as "day",
diff --git a/ci/scripts/sql/nexmark/q18-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q18-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..6848636f73976
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q18-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q18_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q18-temporal-filter.sql b/ci/scripts/sql/nexmark/q18-temporal-filter.sql
new file mode 100644
index 0000000000000..c1aaec28c5e57
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q18-temporal-filter.sql
@@ -0,0 +1,12 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q18_temporal_filter AS
+SELECT auction, bidder, price, channel, url, date_time
+FROM (SELECT *,
+ ROW_NUMBER() OVER (
+ PARTITION BY bidder, auction
+ ORDER BY date_time DESC
+ ) AS rank_number
+ FROM bid_filtered)
+WHERE rank_number <= 1
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q19-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q19-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..4344479feeaea
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q19-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q19_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q19-temporal-filter.sql b/ci/scripts/sql/nexmark/q19-temporal-filter.sql
new file mode 100644
index 0000000000000..67a2e503839db
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q19-temporal-filter.sql
@@ -0,0 +1,12 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q19_temporal_filter AS
+SELECT *
+FROM (SELECT *,
+ ROW_NUMBER() OVER (
+ PARTITION BY auction
+ ORDER BY price DESC
+ ) AS rank_number
+ FROM bid_filtered)
+WHERE rank_number <= 10
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q2.sql b/ci/scripts/sql/nexmark/q2.sql
index b33480bbfe020..33a1d0d6bcb17 100644
--- a/ci/scripts/sql/nexmark/q2.sql
+++ b/ci/scripts/sql/nexmark/q2.sql
@@ -9,4 +9,4 @@ WHERE auction = 1007
OR auction = 2001
OR auction = 2019
OR auction = 2087
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q20-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q20-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..b0a0a8827b4cf
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q20-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q20_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q20-temporal-filter.sql b/ci/scripts/sql/nexmark/q20-temporal-filter.sql
new file mode 100644
index 0000000000000..095d33f05f5d6
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q20-temporal-filter.sql
@@ -0,0 +1,23 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q20_temporal_filter AS
+SELECT auction,
+ bidder,
+ price,
+ channel,
+ url,
+ B.date_time as bid_date_time,
+ B.extra as bid_extra,
+ item_name,
+ description,
+ initial_bid,
+ reserve,
+ A.date_time as auction_date_time,
+ expires,
+ seller,
+ category,
+ A.extra as auction_extra
+FROM bid_filtered AS B
+ INNER JOIN auction AS A on B.auction = A.id
+WHERE A.category = 10
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q20.sql b/ci/scripts/sql/nexmark/q20.sql
index b3d46d8bae721..3f353cad0d123 100644
--- a/ci/scripts/sql/nexmark/q20.sql
+++ b/ci/scripts/sql/nexmark/q20.sql
@@ -20,4 +20,4 @@ SELECT auction,
FROM bid AS B
INNER JOIN auction AS A on B.auction = A.id
WHERE A.category = 10
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q21-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q21-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..637eefee2a4b1
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q21-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q21_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q21-temporal-filter.sql b/ci/scripts/sql/nexmark/q21-temporal-filter.sql
new file mode 100644
index 0000000000000..ec64fc4242910
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q21-temporal-filter.sql
@@ -0,0 +1,19 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q21_temporal_filter AS
+SELECT auction,
+ bidder,
+ price,
+ channel,
+ CASE
+ WHEN LOWER(channel) = 'apple' THEN '0'
+ WHEN LOWER(channel) = 'google' THEN '1'
+ WHEN LOWER(channel) = 'facebook' THEN '2'
+ WHEN LOWER(channel) = 'baidu' THEN '3'
+ ELSE (regexp_match(url, '(&|^)channel_id=([^&]*)'))[2]
+ END
+ AS channel_id
+FROM bid_filtered
+WHERE (regexp_match(url, '(&|^)channel_id=([^&]*)'))[2] is not null
+ or LOWER(channel) in ('apple', 'google', 'facebook', 'baidu')
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q21.sql b/ci/scripts/sql/nexmark/q21.sql
index 57c48d55451fa..75322978d0b78 100644
--- a/ci/scripts/sql/nexmark/q21.sql
+++ b/ci/scripts/sql/nexmark/q21.sql
@@ -16,4 +16,4 @@ SELECT auction,
FROM bid
WHERE (regexp_match(url, '(&|^)channel_id=([^&]*)'))[2] is not null
or LOWER(channel) in ('apple', 'google', 'facebook', 'baidu')
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q22-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q22-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..7539ad8e25404
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q22-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q22_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q22-temporal-filter.sql b/ci/scripts/sql/nexmark/q22-temporal-filter.sql
new file mode 100644
index 0000000000000..40c47c6e236fd
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q22-temporal-filter.sql
@@ -0,0 +1,12 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q22_temporal_filter AS
+SELECT auction,
+ bidder,
+ price,
+ channel,
+ split_part(url, '/', 4) as dir1,
+ split_part(url, '/', 5) as dir2,
+ split_part(url, '/', 6) as dir3
+FROM bid_filtered
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q22.sql b/ci/scripts/sql/nexmark/q22.sql
index db0ece23786b5..81f861e65b7c6 100644
--- a/ci/scripts/sql/nexmark/q22.sql
+++ b/ci/scripts/sql/nexmark/q22.sql
@@ -9,4 +9,4 @@ SELECT auction,
split_part(url, '/', 5) as dir2,
split_part(url, '/', 6) as dir3
FROM bid
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q3.sql b/ci/scripts/sql/nexmark/q3.sql
index c12f96705059c..9d5e0cd62dc02 100644
--- a/ci/scripts/sql/nexmark/q3.sql
+++ b/ci/scripts/sql/nexmark/q3.sql
@@ -10,4 +10,4 @@ FROM auction AS A
INNER JOIN person AS P on A.seller = P.id
WHERE A.category = 10
and (P.state = 'or' OR P.state = 'id' OR P.state = 'ca')
-WITH ( connector = 'blackhole', type = 'append-only');
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q4-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q4-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..6e1e0a2effd36
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q4-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q4_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q4-temporal-filter.sql b/ci/scripts/sql/nexmark/q4-temporal-filter.sql
new file mode 100644
index 0000000000000..ba3300eb72678
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q4-temporal-filter.sql
@@ -0,0 +1,15 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q4_temporal_filter
+AS
+SELECT Q.category,
+ AVG(Q.final) as avg
+FROM (SELECT MAX(B.price) AS final,
+ A.category
+ FROM auction A,
+ bid_filtered B
+ WHERE A.id = B.auction
+ AND B.date_time BETWEEN A.date_time AND A.expires
+ GROUP BY A.id, A.category) Q
+GROUP BY Q.category
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q5-many-windows-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q5-many-windows-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..0642187cd5eed
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q5-many-windows-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q5_many_windows_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q5-many-windows-temporal-filter.sql b/ci/scripts/sql/nexmark/q5-many-windows-temporal-filter.sql
new file mode 100644
index 0000000000000..50a9c7a20c2c0
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q5-many-windows-temporal-filter.sql
@@ -0,0 +1,43 @@
+-- https://web.archive.org/web/20100620010601/http://datalab.cs.pdx.edu/niagaraST/NEXMark/
+-- The original q5 is `[RANGE 60 MINUTE SLIDE 1 MINUTE]`.
+-- However, using 60 minute may require running a very long period to see the effect.
+-- Therefore, we change it to `[RANGE 5 MINUTE SLIDE 5 SECOND]` to generate many sliding windows.
+-- The percentage between window size and hop interval stays the same as the one in original nexmark.
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q5_many_windows_temporal_filter
+AS
+SELECT
+ AuctionBids.auction, AuctionBids.num
+FROM (
+ SELECT
+ bid.auction,
+ count(*) AS num,
+ window_start AS starttime
+ FROM
+ HOP(bid_filtered, date_time, INTERVAL '5' SECOND, INTERVAL '5' MINUTE) as bid
+ GROUP BY
+ bid.auction,
+ window_start
+) AS AuctionBids
+JOIN (
+ SELECT
+ max(CountBids.num) AS maxn,
+ CountBids.starttime_c
+ FROM (
+ SELECT
+ count(*) AS num,
+ window_start AS starttime_c
+ FROM
+ HOP(bid_filtered, date_time, INTERVAL '5' SECOND, INTERVAL '5' MINUTE) as bid
+ GROUP BY
+ bid.auction,
+ window_start
+ ) AS CountBids
+ GROUP BY
+ CountBids.starttime_c
+ ) AS MaxBids
+ON
+ AuctionBids.starttime = MaxBids.starttime_c AND
+ AuctionBids.num >= MaxBids.maxn
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q5-many-windows.sql b/ci/scripts/sql/nexmark/q5-many-windows.sql
index 1a0daafd4176d..6fd49d2edf682 100644
--- a/ci/scripts/sql/nexmark/q5-many-windows.sql
+++ b/ci/scripts/sql/nexmark/q5-many-windows.sql
@@ -24,19 +24,19 @@ JOIN (
SELECT
max(CountBids.num) AS maxn,
CountBids.starttime_c
- FROM (
- SELECT
+ FROM (
+ SELECT
count(*) AS num,
window_start AS starttime_c
- FROM
+ FROM
HOP(bid, date_time, INTERVAL '5' SECOND, INTERVAL '5' MINUTE)
GROUP BY
bid.auction,
window_start
- ) AS CountBids
- GROUP BY
+ ) AS CountBids
+ GROUP BY
CountBids.starttime_c
- ) AS MaxBids
+ ) AS MaxBids
ON
AuctionBids.starttime = MaxBids.starttime_c AND
AuctionBids.num >= MaxBids.maxn
diff --git a/ci/scripts/sql/nexmark/q5-rewrite-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q5-rewrite-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..a13d94658f2d1
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q5-rewrite-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q5_rewrite_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q5-rewrite-temporal-filter.sql b/ci/scripts/sql/nexmark/q5-rewrite-temporal-filter.sql
new file mode 100644
index 0000000000000..2adb38d99b0d0
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q5-rewrite-temporal-filter.sql
@@ -0,0 +1,20 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q5_rewrite_temporal_filter AS
+SELECT
+ B.auction,
+ B.num
+FROM (
+ SELECT
+ auction,
+ num,
+ /*use rank here to express top-N with ties*/
+ rank() over (partition by starttime order by num desc) as num_rank
+ FROM (
+ SELECT bid.auction, count(*) AS num, window_start AS starttime
+ FROM HOP(bid_filtered, date_time, INTERVAL '2' SECOND, INTERVAL '10' SECOND) as bid
+ GROUP BY window_start, bid.auction
+ )
+) B
+WHERE num_rank <= 1
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q5-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q5-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..14375f59190af
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q5-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q5_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q5-temporal-filter.sql b/ci/scripts/sql/nexmark/q5-temporal-filter.sql
new file mode 100644
index 0000000000000..db19bf6988ef7
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q5-temporal-filter.sql
@@ -0,0 +1,38 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q5_temporal_filter
+AS
+SELECT
+ AuctionBids.auction, AuctionBids.num
+FROM (
+ SELECT
+ bid.auction,
+ count(*) AS num,
+ window_start AS starttime
+ FROM
+ HOP(bid_filtered, date_time, INTERVAL '2' SECOND, INTERVAL '10' SECOND) as bid
+ GROUP BY
+ bid.auction,
+ window_start
+) AS AuctionBids
+JOIN (
+ SELECT
+ max(CountBids.num) AS maxn,
+ CountBids.starttime_c
+ FROM (
+ SELECT
+ count(*) AS num,
+ window_start AS starttime_c
+ FROM
+ HOP(bid_filtered, date_time, INTERVAL '2' SECOND, INTERVAL '10' SECOND) as bid
+ GROUP BY
+ bid.auction,
+ window_start
+ ) AS CountBids
+ GROUP BY
+ CountBids.starttime_c
+ ) AS MaxBids
+ON
+ AuctionBids.starttime = MaxBids.starttime_c AND
+ AuctionBids.num >= MaxBids.maxn
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q6-group-top1-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q6-group-top1-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..22b1ecc2a1424
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q6-group-top1-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q6_group_top1_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q6-group-top1-temporal-filter.sql b/ci/scripts/sql/nexmark/q6-group-top1-temporal-filter.sql
new file mode 100644
index 0000000000000..93c8edd37b903
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q6-group-top1-temporal-filter.sql
@@ -0,0 +1,16 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q6_group_top1_temporal_filter
+AS
+SELECT
+ Q.seller,
+ AVG(Q.final) OVER
+ (PARTITION BY Q.seller ORDER BY Q.date_time ROWS BETWEEN 10 PRECEDING AND CURRENT ROW)
+ as avg
+FROM (
+ SELECT ROW_NUMBER() OVER (PARTITION BY A.id, A.seller ORDER BY B.price) as rank, A.seller, B.price as final, B.date_time
+ FROM auction AS A, bid_filtered AS B
+ WHERE A.id = B.auction and B.date_time between A.date_time and A.expires
+) AS Q
+WHERE Q.rank <= 1
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q7-rewrite-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q7-rewrite-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..9061af9b68af9
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q7-rewrite-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q7_rewrite_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q7-rewrite-temporal-filter.sql b/ci/scripts/sql/nexmark/q7-rewrite-temporal-filter.sql
new file mode 100644
index 0000000000000..80066a8f29a8e
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q7-rewrite-temporal-filter.sql
@@ -0,0 +1,21 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q7_rewrite_temporal_filter AS
+SELECT
+ B.auction,
+ B.price,
+ B.bidder,
+ B.date_time
+FROM (
+ SELECT
+ auction,
+ price,
+ bidder,
+ date_time,
+ /*use rank here to express top-N with ties*/
+ rank() over (partition by window_end order by price desc) as price_rank
+ FROM
+ TUMBLE(bid_filtered, date_time, INTERVAL '10' SECOND)
+) B
+WHERE price_rank <= 1
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q7-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q7-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..217c0e3d41d5c
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q7-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q7_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q7-temporal-filter.sql b/ci/scripts/sql/nexmark/q7-temporal-filter.sql
new file mode 100644
index 0000000000000..e4e3dbeed3561
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q7-temporal-filter.sql
@@ -0,0 +1,17 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q7_temporal_filter
+AS
+SELECT B.auction,
+ B.price,
+ B.bidder,
+ B.date_time
+from bid_filtered B
+ JOIN (SELECT MAX(price) AS maxprice,
+ window_end as date_time
+ FROM
+ TUMBLE(bid_filtered, date_time, INTERVAL '10' SECOND)
+ GROUP BY window_end) B1 ON B.price = B1.maxprice
+WHERE B.date_time BETWEEN B1.date_time - INTERVAL '10' SECOND
+ AND B1.date_time
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/sql/nexmark/q9-temporal-filter.drop.sql b/ci/scripts/sql/nexmark/q9-temporal-filter.drop.sql
new file mode 100644
index 0000000000000..a0684f0dbf44c
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q9-temporal-filter.drop.sql
@@ -0,0 +1,3 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+DROP SINK nexmark_q9_temporal_filter;
diff --git a/ci/scripts/sql/nexmark/q9-temporal-filter.sql b/ci/scripts/sql/nexmark/q9-temporal-filter.sql
new file mode 100644
index 0000000000000..02cfc8997122b
--- /dev/null
+++ b/ci/scripts/sql/nexmark/q9-temporal-filter.sql
@@ -0,0 +1,29 @@
+-- noinspection SqlNoDataSourceInspectionForFile
+-- noinspection SqlResolveForFile
+CREATE SINK nexmark_q9_temporal_filter
+AS
+SELECT id,
+ item_name,
+ description,
+ initial_bid,
+ reserve,
+ date_time,
+ expires,
+ seller,
+ category,
+ auction,
+ bidder,
+ price,
+ bid_date_time
+FROM (SELECT A.*,
+ B.auction,
+ B.bidder,
+ B.price,
+ B.date_time AS bid_date_time,
+ ROW_NUMBER() OVER (PARTITION BY A.id ORDER BY B.price DESC, B.date_time ASC) AS rownum
+ FROM auction A,
+ bid_filtered B
+ WHERE A.id = B.auction
+ AND B.date_time BETWEEN A.date_time AND A.expires) tmp
+WHERE rownum <= 1
+WITH ( connector = 'blackhole', type = 'append-only', force_append_only = 'true');
diff --git a/ci/scripts/standalone-utils.sh b/ci/scripts/standalone-utils.sh
index c0767ddaefb62..059d11aada83f 100755
--- a/ci/scripts/standalone-utils.sh
+++ b/ci/scripts/standalone-utils.sh
@@ -75,7 +75,7 @@ start_standalone() {
}
stop_standalone() {
- pkill standalone
+ killall --wait standalone
}
wait_standalone() {
@@ -101,7 +101,6 @@ wait_standalone() {
restart_standalone() {
stop_standalone
- sleep 5
start_standalone "$PREFIX_LOG"/standalone-restarted.log &
wait_standalone
}
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index c1681ff658765..bce57e69147f4 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -60,6 +60,7 @@ services:
ENABLE_TELEMETRY: ${ENABLE_TELEMETRY:-true}
RW_TELEMETRY_TYPE: ${RW_TELEMETRY_TYPE:-"docker-compose"}
RW_SECRET_STORE_PRIVATE_KEY_HEX: ${RW_SECRET_STORE_PRIVATE_KEY_HEX:-0123456789abcdef}
+ RW_LICENSE_KEY: ${RW_LICENSE_KEY:-""}
container_name: risingwave-standalone
healthcheck:
test:
diff --git a/docs/dev/book.toml b/docs/dev/book.toml
index 77608409452b0..3479e428351a5 100644
--- a/docs/dev/book.toml
+++ b/docs/dev/book.toml
@@ -11,5 +11,6 @@ smart-punctuation = true
git-repository-url = "https://github.com/risingwavelabs/risingwave/tree/main/docs/dev/src"
edit-url-template = "https://github.com/risingwavelabs/risingwave/edit/main/docs/dev/{path}"
search.use-boolean-and = true
+mathjax-support = true
[output.linkcheck]
diff --git a/docs/dev/src/design/consistent-hash.md b/docs/dev/src/design/consistent-hash.md
index 43c3b973c04bc..551b7f4c9633b 100644
--- a/docs/dev/src/design/consistent-hash.md
+++ b/docs/dev/src/design/consistent-hash.md
@@ -20,23 +20,23 @@ First, we need to introduce a little about how we schedule the actors. Each work
Here comes the main part, where we will construct a mapping that determines data distribution.
-For all data $k \in U_k$, where $U_k$ is an unbounded set, we apply a hash function $v = H(k)$, where $v$ falls to a limited range. The hash function $H$ ensures that all data are hashed **uniformly** to that range. We call $v$ vnode, namely virtual node, as is shown as the squares in the figure below.
+For all data \\( k \in U_k \\), where \\( U_k \\) is an unbounded set, we apply a hash function \\( v = H(k) \\), where \\( v \\) falls to a limited range. The hash function \\( H \\) ensures that all data are hashed **uniformly** to that range. We call \\( v \\) vnode, namely virtual node, as is shown as the squares in the figure below.
![initial data distribution](../images/consistent-hash/data-distribution.svg)
-Then we have vnode mapping, which ensures that vnodes are mapped evenly to parallel units in the cluster. In other words, the number of vnodes that are mapped to each parallel unit should be as close as possible. This is denoted by different colors in the figure above. As is depicted, we have 3 parallel units (shown as circles), each taking $\frac{1}{3}$ of total vnodes. Vnode mapping is [constructed and maintained by meta](https://github.com/risingwavelabs/risingwave/blob/main/src/meta/src/stream/scheduler.rs).
+Then we have vnode mapping, which ensures that vnodes are mapped evenly to parallel units in the cluster. In other words, the number of vnodes that are mapped to each parallel unit should be as close as possible. This is denoted by different colors in the figure above. As is depicted, we have 3 parallel units (shown as circles), each taking \\( \frac{1}{3} \\) of total vnodes. Vnode mapping is [constructed and maintained by meta](https://github.com/risingwavelabs/risingwave/blob/main/src/meta/src/stream/scheduler.rs).
-As long as the hash function $H$ could ensure uniformity, the data distribution determined by this strategy would be even across physical resources. The evenness will be retained even if data in $U_k$ are skewed to a certain range, say, most students scoring over 60 in a hundred-mark system.
+As long as the hash function \\( H \\) could ensure uniformity, the data distribution determined by this strategy would be even across physical resources. The evenness will be retained even if data in \\( U_k \\) are skewed to a certain range, say, most students scoring over 60 in a hundred-mark system.
#### Data Redistribution
-Since $v = H(k)$, the way that data are mapped to vnodes will be invariant. Therefore, when scaling occurs, we only need to modify vnode mapping (the way that vnodes are mapped to parallel units), so as to redistribute the data.
+Since \\( v = H(k) \\), the way that data are mapped to vnodes will be invariant. Therefore, when scaling occurs, we only need to modify vnode mapping (the way that vnodes are mapped to parallel units), so as to redistribute the data.
-Let's take scaling out for example. Assume that we have one more parallel unit after scaling out, as is depicted as the orange circle in the figure below. Using the optimal strategy, we modify the vnode mapping in such a way that only $\frac{1}{4}$ of the data have to be moved, as is shown in the figure below. The vnodes whose data are required to be moved are highlighted with bold border in the figure.
+Let's take scaling out for example. Assume that we have one more parallel unit after scaling out, as is depicted as the orange circle in the figure below. Using the optimal strategy, we modify the vnode mapping in such a way that only \\( \frac{1}{4} \\) of the data have to be moved, as is shown in the figure below. The vnodes whose data are required to be moved are highlighted with bold border in the figure.
![optimal data redistribution](../images/consistent-hash/data-redistribution-1.svg)
-To minimize data movement when scaling occurs, we should be careful when we modify the vnode mapping. Below is an opposite example. Modifying vnode mapping like this will result in $\frac{1}{2}$ of the data being moved.
+To minimize data movement when scaling occurs, we should be careful when we modify the vnode mapping. Below is an opposite example. Modifying vnode mapping like this will result in \\( \frac{1}{2} \\) of the data being moved.
![worst data redistribution](../images/consistent-hash/data-redistribution-2.svg)
@@ -49,9 +49,9 @@ We know that a fragment has several actors as its different parallelisms, and th
In the figure, we can see that one upstream actor dispatches data to three downstream actors. The downstream actors are scheduled on the parallel units mentioned in previous example respectively.
Based on our consistent hash design, the dispatcher is informed of the latest vnode mapping by meta node. It then decides how to send data by following steps:
-1. Compute vnode of the data via the hash function $H$. Let the vnode be $v_k$.
-2. Look up vnode mapping and find out parallel unit $p_n$ that vnode $v_k$ maps to.
-3. Send data to the downstream actor that is scheduled on parallel unit $p_n$ (remember that one actor will be scheduled on exactly one parallel unit).
+1. Compute vnode of the data via the hash function \\( H \\). Let the vnode be \\( v_k \\).
+2. Look up vnode mapping and find out parallel unit \\( p_n \\) that vnode \\( v_k \\) maps to.
+3. Send data to the downstream actor that is scheduled on parallel unit \\( p_n \\) (remember that one actor will be scheduled on exactly one parallel unit).
In this way, all actors' data (i.e. actors' states) will be distributed according to the vnode mapping constructed by meta.
@@ -78,7 +78,7 @@ We know that [Hummock](./state-store-overview.md#overview), our LSM-Tree-based s
```
table_id | vnode | ...
```
-where `table_id` denotes the [state table](relational-table.md), and `vnode` is computed via $H$ on key of the data.
+where `table_id` denotes the [state table](relational-table.md), and `vnode` is computed via \\( H \\) on key of the data.
To illustrate this, let's revisit the [previous example](#streaming). Executors of an operator will share the same logical state table, just as is shown in the figure below:
diff --git a/docs/dev/src/images/architecture_20240814.png b/docs/dev/src/images/architecture_20240814.png
new file mode 100644
index 0000000000000..9d90e7bd86555
Binary files /dev/null and b/docs/dev/src/images/architecture_20240814.png differ
diff --git a/e2e_test/batch/basic/func.slt.part b/e2e_test/batch/basic/func.slt.part
index ebcce5ceb6a47..b5c68f86d6236 100644
--- a/e2e_test/batch/basic/func.slt.part
+++ b/e2e_test/batch/basic/func.slt.part
@@ -317,6 +317,11 @@ select count(current_database());
----
1
+query I
+select count(current_catalog);
+----
+1
+
query T
select regexp_match('abc', 'bc');
----
diff --git a/e2e_test/batch/distribution_mode.slt b/e2e_test/batch/distribution_mode.slt
index c125000a3acd4..b680796277c13 100644
--- a/e2e_test/batch/distribution_mode.slt
+++ b/e2e_test/batch/distribution_mode.slt
@@ -4,7 +4,7 @@ SET RW_IMPLICIT_FLUSH TO true;
statement ok
SET QUERY_MODE TO distributed;
-include ./basic/*.slt.part
+include ./basic/**/*.slt.part
include ./duckdb/all.slt.part
include ./order/*.slt.part
include ./join/*.slt.part
diff --git a/e2e_test/batch/local_mode.slt b/e2e_test/batch/local_mode.slt
index c3818989443a7..68df9f0d91950 100644
--- a/e2e_test/batch/local_mode.slt
+++ b/e2e_test/batch/local_mode.slt
@@ -4,7 +4,7 @@ SET RW_IMPLICIT_FLUSH TO true;
statement ok
SET QUERY_MODE TO local;
-include ./basic/*.slt.part
+include ./basic/**/*.slt.part
include ./duckdb/all.slt.part
include ./order/*.slt.part
include ./join/*.slt.part
diff --git a/e2e_test/batch/types/list.slt.part b/e2e_test/batch/types/list.slt.part
deleted file mode 100644
index 031a466a5a3b2..0000000000000
--- a/e2e_test/batch/types/list.slt.part
+++ /dev/null
@@ -1,2 +0,0 @@
-# Test cases for list don't work for now as the parser cannot recognize the cast expression.
-# include list/*.slt.part
diff --git a/e2e_test/batch/types/map.slt.part b/e2e_test/batch/types/map.slt.part
new file mode 100644
index 0000000000000..bcdc92103e936
--- /dev/null
+++ b/e2e_test/batch/types/map.slt.part
@@ -0,0 +1,126 @@
+statement ok
+SET RW_IMPLICIT_FLUSH TO true;
+
+
+statement error
+create table t (m map (float, float));
+----
+db error: ERROR: Failed to run the query
+
+Caused by:
+ invalid map key type: double precision
+
+
+query error
+select map_from_entries(array[1.0,2.0,3.0], array[1,2,3]);
+----
+db error: ERROR: Failed to run the query
+
+Caused by these errors (recent errors listed first):
+ 1: Failed to bind expression: map_from_entries(ARRAY[1.0, 2.0, 3.0], ARRAY[1, 2, 3])
+ 2: Expr error
+ 3: invalid map key type: numeric
+
+
+query error
+select map_from_entries(array[1,1,3], array[1,2,3]);
+----
+db error: ERROR: Failed to run the query
+
+Caused by these errors (recent errors listed first):
+ 1: Expr error
+ 2: error while evaluating expression `map('{1,1,3}', '{1,2,3}')`
+ 3: map keys must be unique
+
+
+query ?
+select map_from_entries(array[1,2,3], array[1,null,3]);
+----
+{"1":1,"2":NULL,"3":3}
+
+
+query error
+select map_from_entries(array[1,null,3], array[1,2,3]);
+----
+db error: ERROR: Failed to run the query
+
+Caused by these errors (recent errors listed first):
+ 1: Expr error
+ 2: error while evaluating expression `map('{1,NULL,3}', '{1,2,3}')`
+ 3: map keys must not be NULL
+
+
+query error
+select map_from_entries(array[1,3], array[1,2,3]);
+----
+db error: ERROR: Failed to run the query
+
+Caused by these errors (recent errors listed first):
+ 1: Expr error
+ 2: error while evaluating expression `map('{1,3}', '{1,2,3}')`
+ 3: map keys and values have different length
+
+
+query error
+select map_from_entries(array[1,2], array[1,2]) = map_from_entries(array[2,1], array[2,1]);
+----
+db error: ERROR: Failed to run the query
+
+Caused by these errors (recent errors listed first):
+ 1: Failed to bind expression: map_from_entries(ARRAY[1, 2], ARRAY[1, 2]) = map_from_entries(ARRAY[2, 1], ARRAY[2, 1])
+ 2: function equal(map(integer,integer), map(integer,integer)) does not exist
+
+
+statement ok
+create table t (
+ m1 map(varchar, float),
+ m2 map(int, bool),
+ m3 map(varchar, map(varchar, varchar)),
+ l map(varchar,int)[],
+ s struct)>,
+);
+
+
+statement ok
+insert into t values (
+ map_from_entries(array['a','b','c'], array[1.0,2.0,3.0]::float[]),
+ map_from_entries(array[1,2,3], array[true,false,true]),
+ map_from_entries(array['a','b'],
+ array[
+ map_from_entries(array['a1'], array['a2']),
+ map_from_entries(array['b1'], array['b2'])
+ ]
+ ),
+ array[
+ map_from_entries(array['a','b','c'], array[1,2,3]),
+ map_from_entries(array['d','e','f'], array[4,5,6])
+ ],
+ row(
+ map_from_entries(array['a','b','c'], array[row(1),row(2),row(3)]::struct[])
+ )
+);
+
+# cast(map(character varying,integer)) -> map(character varying,double precision)
+query ?
+select map_from_entries(array['a','b','c'], array[1,2,3])::map(varchar,float);
+----
+{"a":1,"b":2,"c":3}
+
+
+statement ok
+insert into t(m1) values (map_from_entries(array['a','b','c'], array[1,2,3]));
+
+query ????? rowsort
+select * from t;
+----
+{"a":1,"b":2,"c":3} NULL NULL NULL NULL
+{"a":1,"b":2,"c":3} {"1":t,"2":f,"3":t} {"a":{"a1":a2},"b":{"b1":b2}} {"{\"a\":1,\"b\":2,\"c\":3}","{\"d\":4,\"e\":5,\"f\":6}"} ("{""a"":(1),""b"":(2),""c"":(3)}")
+
+query ????? rowsort
+select to_jsonb(m1), to_jsonb(m2), to_jsonb(m3), to_jsonb(l), to_jsonb(s) from t;
+----
+{"a": 1.0, "b": 2.0, "c": 3.0} null null null null
+{"a": 1.0, "b": 2.0, "c": 3.0} {"1": true, "2": false, "3": true} {"a": {"a1": "a2"}, "b": {"b1": "b2"}} [{"a": 1, "b": 2, "c": 3}, {"d": 4, "e": 5, "f": 6}] {"m": {"a": {"x": 1}, "b": {"x": 2}, "c": {"x": 3}}}
+
+statement ok
+drop table t;
diff --git a/e2e_test/batch/types/struct.slt.part b/e2e_test/batch/types/struct.slt.part
deleted file mode 100644
index 396881000d48a..0000000000000
--- a/e2e_test/batch/types/struct.slt.part
+++ /dev/null
@@ -1 +0,0 @@
-include struct/*.slt.part
diff --git a/e2e_test/s3/fs_source_batch.py b/e2e_test/s3/fs_source_batch.py
index d606be36f37f0..9f8da63533a68 100644
--- a/e2e_test/s3/fs_source_batch.py
+++ b/e2e_test/s3/fs_source_batch.py
@@ -109,6 +109,59 @@ def _assert_eq(field, got, expect):
cur.close()
conn.close()
+def test_empty_source(config, prefix, fmt):
+ conn = psycopg2.connect(
+ host="localhost",
+ port="4566",
+ user="root",
+ database="dev"
+ )
+
+ # Open a cursor to execute SQL statements
+ cur = conn.cursor()
+
+ def _source():
+ return f's3_test_empty_{fmt}'
+
+ def _encode():
+ if fmt == 'json':
+ return 'JSON'
+ else:
+ return f"CSV (delimiter = ',', without_header = {str('without' in fmt).lower()})"
+
+ # Execute a SELECT statement
+ cur.execute(f'''CREATE SOURCE {_source()}(
+ id int,
+ name TEXT,
+ sex int,
+ mark int,
+ ) WITH (
+ connector = 's3_v2',
+ match_pattern = '{prefix}*.{fmt}',
+ s3.region_name = '{config['S3_REGION']}',
+ s3.bucket_name = '{config['S3_BUCKET']}',
+ s3.credentials.access = '{config['S3_ACCESS_KEY']}',
+ s3.credentials.secret = '{config['S3_SECRET_KEY']}',
+ s3.endpoint_url = 'https://{config['S3_ENDPOINT']}'
+ ) FORMAT PLAIN ENCODE {_encode()};''')
+
+ stmt = f'select count(*), sum(id), sum(sex), sum(mark) from {_source()}'
+ print(f'Execute {stmt}')
+ cur.execute(stmt)
+ result = cur.fetchone()
+
+ print('Got:', result)
+
+ def _assert_eq(field, got, expect):
+ assert got == expect, f'{field} assertion failed: got {got}, expect {expect}.'
+
+ _assert_eq('count(*)', result[0], 0)
+
+ print('Empty source test pass')
+
+ cur.execute(f'drop source {_source()}')
+ cur.close()
+ conn.close()
if __name__ == "__main__":
FILE_NUM = 4001
@@ -153,3 +206,5 @@ def _assert_eq(field, got, expect):
# clean up s3 files
for idx, _ in enumerate(formatted_files):
client.remove_object(config["S3_BUCKET"], _s3(idx))
+
+ test_empty_source(config, run_id, fmt)
\ No newline at end of file
diff --git a/e2e_test/sink/kafka/protobuf.slt b/e2e_test/sink/kafka/protobuf.slt
index 61a91435567da..5f032ba32f8dc 100644
--- a/e2e_test/sink/kafka/protobuf.slt
+++ b/e2e_test/sink/kafka/protobuf.slt
@@ -28,6 +28,18 @@ format plain encode protobuf (
schema.registry = 'http://schemaregistry:8082',
message = 'test.package.MessageH.MessageI');
+system ok
+rpk topic create test-rw-sink-upsert-protobuf
+
+statement ok
+create table from_kafka_raw (kafka_value bytea)
+include key as kafka_key
+with (
+ connector = 'kafka',
+ topic = 'test-rw-sink-upsert-protobuf',
+ properties.bootstrap.server = 'message_queue:29092')
+format plain encode bytes;
+
statement ok
create table into_kafka (
bool_field bool,
@@ -84,6 +96,40 @@ format plain encode protobuf (
schema.registry = 'http://schemaregistry:8082',
message = 'test.package.MessageH.MessageI');
+statement error
+create sink sink_upsert from into_kafka with (
+ connector = 'kafka',
+ topic = 'test-rw-sink-upsert-protobuf',
+ properties.bootstrap.server = 'message_queue:29092',
+ primary_key = 'string_field')
+format upsert encode protobuf (
+ schema.location = 'file:///risingwave/proto-recursive',
+ message = 'recursive.AllTypes');
+----
+db error: ERROR: Failed to run the query
+
+Caused by these errors (recent errors listed first):
+ 1: gRPC request to meta service failed: Internal error
+ 2: failed to validate sink
+ 3: config error
+ 4: sink format/encode/key_encode unsupported: Upsert Protobuf None
+
+
+statement ok
+create sink sink_upsert from into_kafka with (
+ connector = 'kafka',
+ topic = 'test-rw-sink-upsert-protobuf',
+ properties.bootstrap.server = 'message_queue:29092',
+ primary_key = 'string_field')
+format upsert encode protobuf (
+ schema.location = 'file:///risingwave/proto-recursive',
+ message = 'recursive.AllTypes')
+key encode text;
+
+# Shall be ignored by force_append_only sinks but processed by upsert sinks.
+statement ok
+delete from into_kafka where bool_field;
+
sleep 2s
query TTTRRIIIIIITTTI
@@ -119,6 +165,11 @@ select field_i from from_kafka_csr_nested order by 1;
13
24
+query T
+select convert_from(kafka_key, 'utf-8') from from_kafka_raw where kafka_value is null;
+----
+Rising
+
statement error No such file
create sink sink_err from into_kafka with (
connector = 'kafka',
@@ -150,16 +201,19 @@ format plain encode protobuf (
message = 'recursive.AllTypes');
statement ok
-drop sink sink_csr_nested;
+drop table from_kafka cascade;
statement ok
-drop sink sink_csr_trivial;
+drop table from_kafka_csr_trivial cascade;
statement ok
-drop sink sink0;
+drop table from_kafka_csr_nested cascade;
statement ok
-drop table into_kafka;
+drop table from_kafka_raw cascade;
statement ok
-drop table from_kafka;
+drop table into_kafka cascade;
+
+system ok
+rpk topic delete test-rw-sink-upsert-protobuf
diff --git a/e2e_test/source/cdc_inline/alter/cdc_backfill_rate_limit.slt b/e2e_test/source/cdc_inline/alter/cdc_backfill_rate_limit.slt
new file mode 100644
index 0000000000000..ffc0fdfea102b
--- /dev/null
+++ b/e2e_test/source/cdc_inline/alter/cdc_backfill_rate_limit.slt
@@ -0,0 +1,78 @@
+control substitution on
+
+# mysql env vars will be read from the `.risingwave/config/risedev-env` file
+
+system ok
+mysql -e "
+ SET GLOBAL time_zone = '+00:00';
+"
+
+system ok
+mysql -e "
+ DROP DATABASE IF EXISTS testdb2;
+ CREATE DATABASE testdb2;
+ USE testdb2;
+ CREATE TABLE orders (
+ order_id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,
+ order_date DATETIME NOT NULL,
+ customer_name VARCHAR(255) NOT NULL,
+ price DECIMAL(10, 5) NOT NULL,
+ product_id INTEGER NOT NULL,
+ order_status BOOLEAN NOT NULL
+ ) AUTO_INCREMENT = 10001;
+ INSERT INTO orders
+ VALUES (default, '2020-07-30 10:08:22', 'Jark', 50.50, 102, false),
+ (default, '2020-07-30 10:11:09', 'Sally', 15.00, 105, false),
+ (default, '2020-07-30 12:00:30', 'Edward', 25.25, 106, false);
+"
+
+statement ok
+create source mysql_source with (
+ connector = 'mysql-cdc',
+ hostname = '${MYSQL_HOST}',
+ port = '${MYSQL_TCP_PORT}',
+ username = 'root',
+ password = '${MYSQL_PWD}',
+ database.name = 'testdb2',
+ server.id = '5185'
+);
+
+# backfill rate limit to zero
+statement ok
+set backfill_rate_limit=0;
+
+statement ok
+create table my_orders (
+ order_id int,
+ order_date timestamp,
+ customer_name string,
+ price decimal,
+ product_id int,
+ order_status smallint,
+ PRIMARY KEY (order_id)
+) from mysql_source table 'testdb2.orders';
+
+sleep 3s
+
+query I
+select count(*) from my_orders;
+----
+0
+
+# alter rate limit
+statement ok
+ALTER TABLE my_orders SET backfill_rate_limit = 1000;
+
+# wait alter ddl
+sleep 3s
+
+query I
+select count(*) from my_orders;
+----
+3
+
+statement ok
+drop table my_orders;
+
+statement ok
+drop source mysql_source cascade;
diff --git a/e2e_test/source/cdc_inline/sql_server_cdc/sql_server_cdc.slt b/e2e_test/source/cdc_inline/sql_server_cdc/sql_server_cdc.slt
index ec62c3d08adf4..57275043da202 100644
--- a/e2e_test/source/cdc_inline/sql_server_cdc/sql_server_cdc.slt
+++ b/e2e_test/source/cdc_inline/sql_server_cdc/sql_server_cdc.slt
@@ -5,9 +5,18 @@ control substitution on
system ok
sqlcmd -C -d master -Q 'create database mydb;' -b
+system ok
+sqlcmd -C -d master -Q 'create database UpperDB COLLATE SQL_Latin1_General_CP1_CS_AS;' -b
+
system ok
sqlcmd -C -i e2e_test/source/cdc_inline/sql_server_cdc/sql_server_cdc_prepare.sql -b
+system ok
+sqlcmd -C -d UpperDB -Q "CREATE SCHEMA UpperSchema;" -b
+
+system ok
+sqlcmd -C -d UpperDB -Q "EXEC sys.sp_cdc_enable_db; CREATE TABLE UpperSchema.UpperTable (ID INT PRIMARY KEY, Name VARCHAR(100)); EXEC sys.sp_cdc_enable_table @source_schema = 'UpperSchema', @source_name = 'UpperTable', @role_name = NULL; INSERT INTO UpperSchema.UpperTable VALUES (1, 'Alice');" -b
+
# ------------ validate stage ------------
# invalid address, comment this test out because it takes long to wait for TCP connection timeout.
@@ -114,6 +123,17 @@ CREATE TABLE sqlserver_all_data_types (
database.name = '${SQLCMDDBNAME}',
);
+# invalid dbname
+statement error does not match db_name
+CREATE SOURCE upper_mssql_source WITH (
+ connector = 'sqlserver-cdc',
+ hostname = '${SQLCMDSERVER:sqlserver-server}',
+ port = '${SQLCMDPORT:1433}',
+ username = '${SQLCMDUSER:SA}',
+ password = '${SQLCMDPASSWORD}',
+ database.name = 'upperdb',
+);
+
# ------------ Create source/table/mv stage ------------
# create a cdc source job, which format fixed to `FORMAT PLAIN ENCODE JSON`
statement ok
@@ -126,6 +146,16 @@ CREATE SOURCE mssql_source WITH (
database.name = '${SQLCMDDBNAME}',
);
+statement ok
+CREATE SOURCE upper_mssql_source WITH (
+ connector = 'sqlserver-cdc',
+ hostname = '${SQLCMDSERVER:sqlserver-server}',
+ port = '${SQLCMDPORT:1433}',
+ username = '${SQLCMDUSER:SA}',
+ password = '${SQLCMDPASSWORD}',
+ database.name = 'UpperDB',
+);
+
statement error Should not create MATERIALIZED VIEW or SELECT directly on shared CDC source
create materialized view mv as select * from mssql_source;
@@ -250,6 +280,34 @@ CREATE TABLE shared_sqlserver_all_data_types (
PRIMARY KEY (id)
) from mssql_source table 'dbo.sqlserver_all_data_types';
+statement error Sql Server table 'UpperSchema'.'UpperTable' doesn't exist in 'mydb'
+CREATE TABLE upper_table (
+ "ID" INT,
+ "Name" VARCHAR,
+ PRIMARY KEY ("ID")
+) from mssql_source table 'UpperSchema.UpperTable';
+
+statement error Column 'name' not found in the upstream database
+CREATE TABLE upper_table (
+ "ID" INT,
+ name VARCHAR,
+ PRIMARY KEY ("ID")
+) from upper_mssql_source table 'UpperSchema.UpperTable';
+
+statement error Sql Server table 'upperSchema'.'upperTable' doesn't exist in 'UpperDB'
+CREATE TABLE upper_table (
+ "ID" INT,
+ "Name" VARCHAR,
+ PRIMARY KEY ("ID")
+) from upper_mssql_source table 'upperSchema.upperTable';
+
+statement ok
+CREATE TABLE upper_table (
+ "ID" INT,
+ "Name" VARCHAR,
+ PRIMARY KEY ("ID")
+) from upper_mssql_source table 'UpperSchema.UpperTable';
+
statement ok
create materialized view shared_orders_cnt as select count(*) as cnt from shared_orders;
@@ -307,6 +365,9 @@ SELECT * from shared_sqlserver_all_data_types order by id;
system ok
sqlcmd -C -i e2e_test/source/cdc_inline/sql_server_cdc/sql_server_cdc_insert.sql -b
+system ok
+sqlcmd -C -d UpperDB -Q "INSERT INTO UpperSchema.UpperTable VALUES (11, 'Alice');" -b
+
sleep 10s
# ------------ recover cluster ------------
@@ -332,7 +393,6 @@ select cnt from shared_sqlserver_all_data_types_cnt;
----
6
-
query III
select * from shared_orders order by order_id;
----
@@ -359,6 +419,15 @@ SELECT * from shared_sqlserver_all_data_types order by id;
12 t 255 -32768 -2147483648 -9223372036854775808 -10 -10000 -10000 aa \xff 1990-01-01 13:59:59.123 2000-01-01 11:00:00.123 1990-01-01 00:00:01.123+00:00
13 t 127 32767 2147483647 9223372036854775807 -10 10000 10000 zzzz \xffffffff 2999-12-31 23:59:59.999 2099-12-31 23:59:59.999 2999-12-31 23:59:59.999+00:00
+query TT
+SELECT * from upper_table order by "ID";
+----
+1 Alice
+11 Alice
+
# ------------ drop stage ------------
+statement ok
+drop source upper_mssql_source cascade;
+
statement ok
drop source mssql_source cascade;
diff --git a/e2e_test/streaming/aggregate/two_phase_approx_percentile_merge_stateful_agg.slt b/e2e_test/streaming/aggregate/two_phase_approx_percentile_merge_stateful_agg.slt
new file mode 100644
index 0000000000000..012b1ffffb762
--- /dev/null
+++ b/e2e_test/streaming/aggregate/two_phase_approx_percentile_merge_stateful_agg.slt
@@ -0,0 +1,80 @@
+# Single phase approx percentile
+statement ok
+create table t(p_col double, grp_col int);
+
+statement ok
+insert into t select a, 1 from generate_series(-1000, 1000) t(a);
+
+statement ok
+flush;
+
+query I
+select
+ percentile_cont(0.01) within group (order by p_col) as p01,
+ min(p_col),
+ percentile_cont(0.5) within group (order by p_col) as p50,
+ count(*),
+ percentile_cont(0.99) within group (order by p_col) as p99
+from t;
+----
+-980 -1000 0 2001 980
+
+statement ok
+create materialized view m1 as
+ select
+ approx_percentile(0.01, 0.01) within group (order by p_col) as p01,
+ min(p_col),
+ approx_percentile(0.5, 0.01) within group (order by p_col) as p50,
+ count(*),
+ approx_percentile(0.99, 0.01) within group (order by p_col) as p99
+ from t;
+
+query I
+select * from m1;
+----
+-982.5779489474152 -1000 0 2001 982.5779489474152
+
+# Test state encode / decode
+onlyif can-use-recover
+statement ok
+recover;
+
+onlyif can-use-recover
+sleep 10s
+
+query I
+select * from m1;
+----
+-982.5779489474152 -1000 0 2001 982.5779489474152
+
+# Test 0 1000)
// ------------------------
// Internal functions
diff --git a/proto/meta.proto b/proto/meta.proto
index 0371b5540a6da..bcb6c331549f2 100644
--- a/proto/meta.proto
+++ b/proto/meta.proto
@@ -273,6 +273,7 @@ enum ThrottleTarget {
SOURCE = 1;
MV = 2;
TABLE_WITH_SOURCE = 3;
+ CDC_TABLE = 4;
}
message ApplyThrottleRequest {
diff --git a/proto/stream_service.proto b/proto/stream_service.proto
index 08f0ff1e7684f..ef49d4902a14a 100644
--- a/proto/stream_service.proto
+++ b/proto/stream_service.proto
@@ -101,7 +101,7 @@ message WaitEpochCommitResponse {
message StreamingControlStreamRequest {
message InitRequest {
- uint64 prev_epoch = 2;
+ uint64 version_id = 1;
}
message RemovePartialGraphRequest {
diff --git a/risedev.yml b/risedev.yml
index db8f6fe5600e2..3c7f8e0e09be4 100644
--- a/risedev.yml
+++ b/risedev.yml
@@ -477,8 +477,8 @@ profile:
parallelism: 8
- use: frontend
- use: compactor
- - use: prometheus
- - use: grafana
+ # - use: prometheus
+ # - use: grafana
# Do not use kafka here, we will spawn it separately,
# so we don't have to re-generate data each time.
# RW will still be ale to talk to it.
diff --git a/src/batch/Cargo.toml b/src/batch/Cargo.toml
index 099ae9019afcf..403eb864229d3 100644
--- a/src/batch/Cargo.toml
+++ b/src/batch/Cargo.toml
@@ -63,7 +63,7 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [
"fs",
] }
tokio-metrics = "0.3.0"
-tokio-stream = "0.1"
+tokio-stream = { workspace = true }
tokio-util = { workspace = true }
tonic = { workspace = true }
tracing = "0.1"
diff --git a/src/batch/src/executor/hash_agg.rs b/src/batch/src/executor/hash_agg.rs
index d69d4fbc8b174..00073217f7ead 100644
--- a/src/batch/src/executor/hash_agg.rs
+++ b/src/batch/src/executor/hash_agg.rs
@@ -20,7 +20,6 @@ use bytes::Bytes;
use futures_async_stream::try_stream;
use hashbrown::hash_map::Entry;
use itertools::Itertools;
-use prost::Message;
use risingwave_common::array::{DataChunk, StreamChunk};
use risingwave_common::bitmap::Bitmap;
use risingwave_common::catalog::{Field, Schema};
@@ -35,6 +34,7 @@ use risingwave_expr::aggregate::{AggCall, AggregateState, BoxedAggregateFunction
use risingwave_pb::batch_plan::plan_node::NodeBody;
use risingwave_pb::batch_plan::HashAggNode;
use risingwave_pb::data::DataChunk as PbDataChunk;
+use risingwave_pb::Message;
use crate::error::{BatchError, Result};
use crate::executor::aggregation::build as build_agg;
diff --git a/src/batch/src/executor/join/distributed_lookup_join.rs b/src/batch/src/executor/join/distributed_lookup_join.rs
index f5ad5ab5ed984..1068ffd7f3349 100644
--- a/src/batch/src/executor/join/distributed_lookup_join.rs
+++ b/src/batch/src/executor/join/distributed_lookup_join.rs
@@ -354,10 +354,7 @@ impl LookupExecutorBuilder for InnerSideExecutorBuilder {
let pk_prefix = OwnedRow::new(scan_range.eq_conds);
if self.lookup_prefix_len == self.table.pk_indices().len() {
- let row = self
- .table
- .get_row(&pk_prefix, self.epoch.clone().into())
- .await?;
+ let row = self.table.get_row(&pk_prefix, self.epoch.into()).await?;
if let Some(row) = row {
self.row_list.push(row);
@@ -366,7 +363,7 @@ impl LookupExecutorBuilder for InnerSideExecutorBuilder {
let iter = self
.table
.batch_iter_with_pk_bounds(
- self.epoch.clone().into(),
+ self.epoch.into(),
&pk_prefix,
..,
false,
diff --git a/src/batch/src/executor/join/hash_join.rs b/src/batch/src/executor/join/hash_join.rs
index 026f03fb65deb..3bfb583d6459d 100644
--- a/src/batch/src/executor/join/hash_join.rs
+++ b/src/batch/src/executor/join/hash_join.rs
@@ -20,7 +20,6 @@ use std::sync::Arc;
use bytes::Bytes;
use futures_async_stream::try_stream;
use itertools::Itertools;
-use prost::Message;
use risingwave_common::array::{Array, DataChunk, RowRef};
use risingwave_common::bitmap::{Bitmap, BitmapBuilder};
use risingwave_common::catalog::Schema;
@@ -34,6 +33,7 @@ use risingwave_common_estimate_size::EstimateSize;
use risingwave_expr::expr::{build_from_prost, BoxedExpression, Expression};
use risingwave_pb::batch_plan::plan_node::NodeBody;
use risingwave_pb::data::DataChunk as PbDataChunk;
+use risingwave_pb::Message;
use super::{ChunkedData, JoinType, RowId};
use crate::error::{BatchError, Result};
diff --git a/src/batch/src/executor/join/local_lookup_join.rs b/src/batch/src/executor/join/local_lookup_join.rs
index 7fcaba71a9c3b..a3be00fc39a22 100644
--- a/src/batch/src/executor/join/local_lookup_join.rs
+++ b/src/batch/src/executor/join/local_lookup_join.rs
@@ -134,7 +134,7 @@ impl InnerSideExecutorBuilder {
..Default::default()
}),
}),
- epoch: Some(self.epoch.clone()),
+ epoch: Some(self.epoch),
tracing_context: TracingContext::from_current_span().to_protobuf(),
};
@@ -237,7 +237,7 @@ impl LookupExecutorBuilder for InnerSideExecutorBuilder
&plan_node,
&task_id,
self.context.clone(),
- self.epoch.clone(),
+ self.epoch,
self.shutdown_rx.clone(),
);
diff --git a/src/batch/src/executor/mod.rs b/src/batch/src/executor/mod.rs
index 3a64901c64a04..80dc57b4f3620 100644
--- a/src/batch/src/executor/mod.rs
+++ b/src/batch/src/executor/mod.rs
@@ -174,7 +174,7 @@ impl<'a, C: Clone> ExecutorBuilder<'a, C> {
plan_node,
self.task_id,
self.context.clone(),
- self.epoch.clone(),
+ self.epoch,
self.shutdown_rx.clone(),
)
}
@@ -188,7 +188,7 @@ impl<'a, C: Clone> ExecutorBuilder<'a, C> {
}
pub fn epoch(&self) -> BatchQueryEpoch {
- self.epoch.clone()
+ self.epoch
}
}
diff --git a/src/batch/src/executor/order_by.rs b/src/batch/src/executor/order_by.rs
index 3f8c8e106c78f..ad7cc13992346 100644
--- a/src/batch/src/executor/order_by.rs
+++ b/src/batch/src/executor/order_by.rs
@@ -17,7 +17,6 @@ use std::sync::Arc;
use bytes::Bytes;
use futures_async_stream::try_stream;
use itertools::Itertools;
-use prost::Message;
use risingwave_common::array::DataChunk;
use risingwave_common::catalog::Schema;
use risingwave_common::memory::MemoryContext;
@@ -28,6 +27,7 @@ use risingwave_common::util::sort_util::ColumnOrder;
use risingwave_common_estimate_size::EstimateSize;
use risingwave_pb::batch_plan::plan_node::NodeBody;
use risingwave_pb::data::DataChunk as PbDataChunk;
+use risingwave_pb::Message;
use super::{
BoxedDataChunkStream, BoxedExecutor, BoxedExecutorBuilder, Executor, ExecutorBuilder,
diff --git a/src/batch/src/executor/row_seq_scan.rs b/src/batch/src/executor/row_seq_scan.rs
index b8287147c6750..b897dbd813787 100644
--- a/src/batch/src/executor/row_seq_scan.rs
+++ b/src/batch/src/executor/row_seq_scan.rs
@@ -237,7 +237,7 @@ impl BoxedExecutorBuilder for RowSeqScanExecutorBuilder {
let ordered = seq_scan_node.ordered;
- let epoch = source.epoch.clone();
+ let epoch = source.epoch;
let limit = seq_scan_node.limit;
let as_of = seq_scan_node
.as_of
@@ -341,8 +341,7 @@ impl RowSeqScanExecutor {
for point_get in point_gets {
let table = table.clone();
if let Some(row) =
- Self::execute_point_get(table, point_get, query_epoch.clone(), histogram.clone())
- .await?
+ Self::execute_point_get(table, point_get, query_epoch, histogram.clone()).await?
{
if let Some(chunk) = data_chunk_builder.append_one_row(row) {
returned += chunk.cardinality() as u64;
@@ -373,7 +372,7 @@ impl RowSeqScanExecutor {
table.clone(),
range,
ordered,
- query_epoch.clone(),
+ query_epoch,
chunk_size,
limit,
histogram.clone(),
diff --git a/src/batch/src/spill/spill_op.rs b/src/batch/src/spill/spill_op.rs
index 237ee3baf0099..b3e842a269ec7 100644
--- a/src/batch/src/spill/spill_op.rs
+++ b/src/batch/src/spill/spill_op.rs
@@ -22,9 +22,9 @@ use futures_util::AsyncReadExt;
use opendal::layers::RetryLayer;
use opendal::services::{Fs, Memory};
use opendal::Operator;
-use prost::Message;
use risingwave_common::array::DataChunk;
use risingwave_pb::data::DataChunk as PbDataChunk;
+use risingwave_pb::Message;
use thiserror_ext::AsReport;
use tokio::sync::Mutex;
use twox_hash::XxHash64;
diff --git a/src/batch/src/task/broadcast_channel.rs b/src/batch/src/task/broadcast_channel.rs
index d66eda7d7d620..9781e38e7d7f6 100644
--- a/src/batch/src/task/broadcast_channel.rs
+++ b/src/batch/src/task/broadcast_channel.rs
@@ -86,7 +86,7 @@ pub fn new_broadcast_channel(
output_channel_size: usize,
) -> (ChanSenderImpl, Vec) {
let broadcast_info = match shuffle.distribution {
- Some(exchange_info::Distribution::BroadcastInfo(ref v)) => v.clone(),
+ Some(exchange_info::Distribution::BroadcastInfo(ref v)) => *v,
_ => BroadcastInfo::default(),
};
diff --git a/src/batch/src/task/task_execution.rs b/src/batch/src/task/task_execution.rs
index 4536dad1c031f..7186ced55febd 100644
--- a/src/batch/src/task/task_execution.rs
+++ b/src/batch/src/task/task_execution.rs
@@ -393,7 +393,7 @@ impl BatchTaskExecution {
self.plan.root.as_ref().unwrap(),
&self.task_id,
self.context.clone(),
- self.epoch.clone(),
+ self.epoch,
self.shutdown_rx.clone(),
)
.build(),
diff --git a/src/bench/Cargo.toml b/src/bench/Cargo.toml
index d451ef46ef838..43451ebaeb9d1 100644
--- a/src/bench/Cargo.toml
+++ b/src/bench/Cargo.toml
@@ -50,7 +50,7 @@ tokio = { version = "0.2", package = "madsim-tokio", features = [
"time",
"signal",
] }
-tokio-stream = "0.1"
+tokio-stream = { workspace = true }
toml = "0.8"
tracing = "0.1"
tracing-subscriber = "0.3.17"
diff --git a/src/cmd_all/src/standalone.rs b/src/cmd_all/src/standalone.rs
index ceb890f4cb3af..27c8c40203397 100644
--- a/src/cmd_all/src/standalone.rs
+++ b/src/cmd_all/src/standalone.rs
@@ -467,6 +467,7 @@ mod test {
heap_profiling_dir: None,
dangerous_max_idle_secs: None,
connector_rpc_endpoint: None,
+ license_key: None,
temp_secret_file_dir: "./meta/secrets/",
},
),
diff --git a/src/common/Cargo.toml b/src/common/Cargo.toml
index a117dce645ae6..2cc1d81f1a38d 100644
--- a/src/common/Cargo.toml
+++ b/src/common/Cargo.toml
@@ -55,7 +55,7 @@ futures = { version = "0.3", default-features = false, features = ["alloc"] }
governor = { version = "0.6", default-features = false, features = ["std"] }
hashbrown = "0.14"
hex = "0.4.3"
-http = "0.2"
+http = "1"
humantime = "2.1"
hytra = { workspace = true }
itertools = { workspace = true }
diff --git a/src/common/common_service/Cargo.toml b/src/common/common_service/Cargo.toml
index cb43702f3f9e6..87206ab7cbc1d 100644
--- a/src/common/common_service/Cargo.toml
+++ b/src/common/common_service/Cargo.toml
@@ -18,7 +18,7 @@ normal = ["workspace-hack"]
async-trait = "0.1"
axum = { workspace = true }
futures = { version = "0.3", default-features = false, features = ["alloc"] }
-hyper = "0.14" # required by tonic
+http = "1"
prometheus = { version = "0.13" }
risingwave_common = { workspace = true }
risingwave_pb = { workspace = true }
diff --git a/src/common/common_service/src/tracing.rs b/src/common/common_service/src/tracing.rs
index 3ee4a64231c29..de6f43bbf33f3 100644
--- a/src/common/common_service/src/tracing.rs
+++ b/src/common/common_service/src/tracing.rs
@@ -15,8 +15,8 @@
use std::task::{Context, Poll};
use futures::Future;
-use hyper::Body;
use risingwave_common::util::tracing::TracingContext;
+use tonic::body::BoxBody;
use tower::{Layer, Service};
use tracing::Instrument;
@@ -49,9 +49,9 @@ pub struct TracingExtract {
inner: S,
}
-impl Service> for TracingExtract
+impl Service> for TracingExtract
where
- S: Service> + Clone + Send + 'static,
+ S: Service> + Clone + Send + 'static,
S::Future: Send + 'static,
{
type Error = S::Error;
@@ -63,7 +63,7 @@ where
self.inner.poll_ready(cx)
}
- fn call(&mut self, req: hyper::Request) -> Self::Future {
+ fn call(&mut self, req: http::Request) -> Self::Future {
// This is necessary because tonic internally uses `tower::buffer::Buffer`.
// See https://github.com/tower-rs/tower/issues/547#issuecomment-767629149
// for details on why this is necessary
diff --git a/src/common/metrics/Cargo.toml b/src/common/metrics/Cargo.toml
index 4f3e8b20936b2..0c32b557cebb2 100644
--- a/src/common/metrics/Cargo.toml
+++ b/src/common/metrics/Cargo.toml
@@ -15,12 +15,16 @@ ignored = ["workspace-hack"]
normal = ["workspace-hack"]
[dependencies]
+auto_impl = "1"
bytes = "1"
clap = { workspace = true }
easy-ext = "1"
futures = { version = "0.3", default-features = false, features = ["alloc"] }
-http = "0.2"
-hyper = { version = "0.14", features = ["client"] } # used by tonic
+http = "1"
+http-02 = { package = "http", version = "0.2" }
+hyper = { version = "1" }
+hyper-014 = { package = "hyper", version = "0.14" }
+hyper-util = { version = "0.1", features = ["client-legacy"] }
hytra = { workspace = true }
itertools = { workspace = true }
parking_lot = { workspace = true }
@@ -32,13 +36,13 @@ serde = { version = "1", features = ["derive"] }
thiserror-ext = { workspace = true }
tokio = { version = "0.2", package = "madsim-tokio" }
tonic = { workspace = true }
+tower-layer = "0.3.2"
+tower-service = "0.3.2"
tracing = "0.1"
tracing-subscriber = "0.3.17"
[target.'cfg(not(madsim))'.dependencies]
-http-body = "0.4.5"
-tower-layer = "0.3.2"
-tower-service = "0.3.2"
+http-body = "1"
[target.'cfg(target_os = "linux")'.dependencies]
procfs = { version = "0.16", default-features = false }
libc = "0.2"
diff --git a/src/common/metrics/src/monitor/connection.rs b/src/common/metrics/src/monitor/connection.rs
index e5774a3f16d7d..aa7c8c8d4baa3 100644
--- a/src/common/metrics/src/monitor/connection.rs
+++ b/src/common/metrics/src/monitor/connection.rs
@@ -24,10 +24,9 @@ use std::time::Duration;
use futures::FutureExt;
use http::Uri;
-use hyper::client::connect::dns::{GaiAddrs, GaiFuture, GaiResolver, Name};
-use hyper::client::connect::Connection;
-use hyper::client::HttpConnector;
-use hyper::service::Service;
+use hyper_util::client::legacy::connect::dns::{GaiAddrs, GaiFuture, GaiResolver, Name};
+use hyper_util::client::legacy::connect::{Connected, Connection, HttpConnector};
+use hyper_util::rt::TokioIo;
use itertools::Itertools;
use pin_project_lite::pin_project;
use prometheus::{
@@ -37,11 +36,13 @@ use prometheus::{
use thiserror_ext::AsReport;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tonic::transport::{Channel, Endpoint};
+use tower_service::Service;
use tracing::{debug, info, warn};
use crate::monitor::GLOBAL_METRICS_REGISTRY;
use crate::{register_guarded_int_counter_vec_with_registry, LabelGuardedIntCounterVec};
+#[auto_impl::auto_impl(&mut)]
pub trait MonitorAsyncReadWrite {
fn on_read(&mut self, _size: usize) {}
fn on_eof(&mut self) {}
@@ -74,6 +75,14 @@ impl MonitoredConnection {
let this = this.project();
(this.inner, this.monitor)
}
+
+ /// Delegate async read/write traits between tokio and hyper.
+ fn hyper_tokio_delegate(
+ self: Pin<&mut Self>,
+ ) -> TokioIo>, &mut M>> {
+ let (inner, monitor) = MonitoredConnection::project_into(self);
+ TokioIo::new(MonitoredConnection::new(TokioIo::new(inner), monitor))
+ }
}
impl AsyncRead for MonitoredConnection {
@@ -112,6 +121,16 @@ impl AsyncRead for MonitoredConnection hyper::rt::Read for MonitoredConnection {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: hyper::rt::ReadBufCursor<'_>,
+ ) -> Poll> {
+ hyper::rt::Read::poll_read(std::pin::pin!(self.hyper_tokio_delegate()), cx, buf)
+ }
+}
+
impl AsyncWrite for MonitoredConnection {
fn poll_write(
self: Pin<&mut Self>,
@@ -186,8 +205,41 @@ impl AsyncWrite for MonitoredConnection
}
}
+impl hyper::rt::Write for MonitoredConnection {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll> {
+ hyper::rt::Write::poll_write(std::pin::pin!(self.hyper_tokio_delegate()), cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> {
+ hyper::rt::Write::poll_flush(std::pin::pin!(self.hyper_tokio_delegate()), cx)
+ }
+
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll> {
+ hyper::rt::Write::poll_shutdown(std::pin::pin!(self.hyper_tokio_delegate()), cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ self.inner.is_write_vectored()
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll> {
+ hyper::rt::Write::poll_write_vectored(std::pin::pin!(self.hyper_tokio_delegate()), cx, bufs)
+ }
+}
+
impl Connection for MonitoredConnection {
- fn connected(&self) -> hyper::client::connect::Connected {
+ fn connected(&self) -> Connected {
self.inner.connected()
}
}
@@ -275,6 +327,58 @@ where
}
}
+// Compatibility implementation for hyper 0.14 ecosystem.
+// Should be the same as those with imports from `http::Uri` and `hyper_util::client::legacy`.
+// TODO(http-bump): remove this after there is no more dependency on hyper 0.14.
+mod compat {
+ use http_02::Uri;
+ use hyper_014::client::connect::{Connected, Connection};
+
+ use super::*;
+
+ impl, M: MonitorNewConnection + Clone + 'static> Service
+ for MonitoredConnection
+ where
+ C::Future: 'static,
+ {
+ type Error = C::Error;
+ type Response = MonitoredConnection;
+
+ type Future = impl Future